<?xml version="1.0" encoding="UTF-8"?>
  <testsuite name="Kubernetes e2e suite" tests="1" failures="1" errors="0" time="360.124">
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-apps] ReplicationController should serve a basic image on each replica with a public image  [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI mock volume CSI online volume expansion should expand volume without restarting pod if attach=off, nodeExpansion=on" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-scheduling] GPUDevicePluginAcrossRecreate [Feature:Recreate] run Nvidia GPU Device Plugin tests with a recreation" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller  should check snapshot fields, check restore correctly works after modifying source data, check deletion" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Projected downwardAPI should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Services should serve a basic endpoint from pods  [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ntfs)][sig-windows] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated. [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes [Feature:vsphere][Feature:ReclaimPolicy] [sig-storage] persistentvolumereclaim:vsphere [Feature:vsphere] should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] EndpointSlice should have Endpoints and EndpointSlices pointing to API Server" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ext3)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when true [LinuxOnly] [NodeConformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Downward API volume should provide container&#39;s memory request [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] ConfigMap should be consumable from pods in volume [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Volume Provisioning On Clustered Datastore [Feature:vsphere] verify dynamic provision with spbm policy on clustered datastore" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (ext3)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota&#39;s priority class scope (cpu, memory quota set) against a pod with same priority class." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] [sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not be able to proxy to the readonly kubelet port 10255 using proxy subresource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] [sig-cloud-provider-gcp] [sig-auth] ServiceAccount admission controller migration [Feature:BoundServiceAccountTokenVolume] master upgrade should maintain a functioning cluster" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] IngressClass [Feature:Ingress] should prevent Ingress creation if more than 1 IngressClass marked as default [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support existing single file [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] EmptyDir volumes volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] ConfigMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-autoscaling] Cluster size autoscaling [Slow] should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-apps] ReplicaSet should serve a basic image on each replica with a public image  [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI mock volume CSI workload information using mock driver should not be passed when podInfoOnMount=nil" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-instrumentation] Events API should delete a collection of events [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] Generated clientset should create v1beta1 cronJobs, delete cronJobs, watch cronJobs" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ntfs)][sig-windows] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] SCTP [Feature:SCTP] [LinuxOnly] should create a ClusterIP Service with SCTP ports" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Volume Provisioning On Clustered Datastore [Feature:vsphere] verify static provisioning on clustered datastore" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ntfs)][sig-windows] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Zone Support [Feature:vsphere] Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-apps] Daemon set [Serial] should run and stop complex daemon [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] [sig-cloud-provider-gcp] Upgrade [Feature:Upgrade] node upgrade should maintain a functioning cluster [Feature:NodeUpgrade]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] ResourceQuota [Feature:ScopeSelectors] should verify ResourceQuota with best effort scope using scope-selectors." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes [Feature:vsphere][Feature:ReclaimPolicy] [sig-storage] persistentvolumereclaim:vsphere [Feature:vsphere] should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] [Feature:Flexvolumes] Detaching volumes should not work when mount is in progress [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] Aggregator Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Projected downwardAPI should update annotations on modification [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Services should be able to create an internal type load balancer [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-auth] ServiceAccounts should support OIDC discovery of service account issuer [Feature:ServiceAccountIssuerDiscovery]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Security Context When creating a pod with readOnlyRootFilesystem should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should support a &#39;default-deny-ingress&#39; policy [Feature:NetworkPolicy]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-scheduling] SchedulerPredicates [Serial] validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cli] Kubectl client Simple pod should support exec through an HTTP proxy" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-windows] [Feature:Windows] Memory Limits [Serial] [Slow] Allocatable node memory should be equal to a calculated allocatable memory value" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes-local  [Volume type: dir-link] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] [sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by ordering unclean reboot and ensure they function upon restart" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI mock volume CSI workload information using mock driver should be passed when podInfoOnMount=true" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cli] Kubectl client Kubectl copy should copy a file from a running Pod" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Networking IPerf IPv4 [Experimental] [Feature:Networking-IPv4] [Slow] [Feature:Networking-Performance] should transfer ~ 1GB onto the service endpoint 1 servers (maximum of 1 clients)" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Zone Support [Feature:vsphere] Verify PVC creation with compatible policy and datastore without any zones specified in the storage class fails (No shared datastores exist among all the nodes)" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] [sig-node] crictl should be able to run crictl on the node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition getting/updating/patching custom resource definition status sub-resource works  [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] [Serial] [Slow] ReplicationController Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Loadbalancing: L7 GCE [Slow] [Feature:NEG] should conform to Ingress spec" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] PrivilegedPod [NodeConformance] should enable privileged commands [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Services should be able to switch session affinity for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters] [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] HostPathType Character Device [Slow] Should be able to mount character device &#39;achardev&#39; successfully when HostPathType is HostPathCharDev" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes-local  [Volume type: tmpfs] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Projected downwardAPI should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should honor timeout [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Probing container should be restarted with a docker exec liveness probe with timeout " classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Downward API volume should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Inline-volume (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Cluster size autoscaler scalability [Slow] CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] files with FSGroup ownership should support (root,0644,tmpfs)" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] [sig-cloud-provider] [Feature:CloudProvider][Disruptive] Nodes should be deleted on API server if it doesn&#39;t exist in the cloud provider" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Inline-volume (ntfs)][sig-windows] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes-local  StatefulSet with pod affinity [Slow] should use volumes on one node when pod has affinity" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Services should work after restarting apiserver [Disruptive]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Pod Disks should be able to delete a non-existent PD without error" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cli] Kubectl client Proxy server should support --unix-socket=/path  [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes-local  [Volume type: tmpfs] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Projected downwardAPI should update labels on modification [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-apps] DaemonRestart [Disruptive] Controller Manager should not create/delete replicas across restart" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replication controller. [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Security Context When creating a container with runAsNonRoot should not run without a specified user ID" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-scheduling] Multi-AZ Clusters should spread the pods of a service across zones" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Services should be able to switch session affinity for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters] [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] GKE local SSD [Feature:GKELocalSSD] should write and read from node local SSD [Feature:GKELocalSSD]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Zone Support [Feature:vsphere] Verify PVC creation with incompatible zone along with compatible storagePolicy and datastore combination specified in storage class fails" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes-local  [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-auth] ServiceAccounts should mount an API token into pods  [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-ui] Kubernetes Dashboard [Feature:Dashboard] should check that the kubernetes-dashboard instance is alive" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] EmptyDir volumes should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a NodePort service" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate configmap [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Secrets should be immutable if `immutable` field is set" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-instrumentation] MetricsGrabber should grab all metrics from API server." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cli] Kubectl client Simple pod should support port-forward" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Security Context When creating a pod with readOnlyRootFilesystem should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] if TerminationMessagePath is set [NodeConformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) [Feature:GenericEphemeralVolume] (late-binding)] ephemeral should create read/write inline ephemeral volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-apps] Deployment RecreateDeployment should delete old pods and create new ones [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Services should test the lifecycle of an Endpoint [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV, based on the allowed zones specified in storage class when the datastore under the zone is present in another datacenter" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-instrumentation] MetricsGrabber should grab all metrics from a Scheduler." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] GenericPersistentVolume[Disruptive] When kubelet restarts Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ntfs)][sig-windows] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext3)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-apps] ReplicationController should release no longer matching pods [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PVC Protection Verify that PVC in active use by a pod is not removed immediately" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (ext3)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Inline-volume (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-auth] PodSecurityPolicy [Feature:PodSecurityPolicy] should allow pods under the privileged policy.PodSecurityPolicy" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket &#39;asocket&#39; when HostPathType is HostPathDirectory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Pods should contain environment variables for services [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] HostPathType File [Slow] Should fail on mounting file &#39;afile&#39; when HostPathType is HostPathSocket" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-autoscaling] DNS horizontal autoscaling [Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] Watchers should receive events on concurrent watches in same order [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Networking Granular Checks: Pods should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Lease lease API should be available [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] Discovery Custom resource should have storage version hash" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cluster-lifecycle] [Feature:BootstrapTokens] should resign the bootstrap tokens when the clusterInfo ConfigMap updated [Serial][Disruptive]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Pods should support remote command execution over websockets [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Cluster size autoscaler scalability [Slow] should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext3)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support readOnly directory specified in the volumeMount" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] Events should delete a collection of events [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow ingress access on one named port [Feature:NetworkPolicy]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket &#39;asocket&#39; when HostPathType is HostPathFile" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cli] Kubectl client Kubectl cluster-info dump should check if cluster-info dump succeeds" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Projected configMap Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) [Feature:GenericEphemeralVolume] (late-binding)] ephemeral should support two pods which share the same volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] Secrets should fail to create secret due to empty secret key [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota&#39;s priority class scope (quota set to pod count: 1) against 2 pods with same priority class." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] [Feature:Example] [k8s.io] Secret should create a pod that reads a secret" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cli] Kubectl client Kubectl version should check is all data is printed  [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] [Serial] [Slow] ReplicaSet Should scale from 1 pod to 3 pods and from 3 to 5" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-autoscaling] Cluster size autoscaling [Slow] should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] [sig-node] NoExecuteTaintManager Single Pod [Serial] evicts pods from tainted nodes" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext3)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] NodeLease when the NodeLease feature is enabled the kubelet should create and update a lease in the kube-node-lease namespace" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes-local  [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Projected secret should be consumable from pods in volume [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] [Serial] [Slow] ReplicaSet Should scale from 5 pods to 3 pods and from 3 to 1" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Verify Volume Attach Through vpxd Restart [Feature:vsphere][Serial][Disruptive] verify volume remains attached through vpxd restart" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-scheduling] SchedulerPredicates [Serial] validates that there is no conflict between pods with same hostPort but different hostIP and protocol [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] [Feature:PerformanceDNS][Serial] Should answer DNS query for maximum number of services per cluster" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cli] Kubectl client Simple pod should support exec" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] [sig-cloud-provider-gcp] Restart [Disruptive] should restart all nodes and ensure all nodes and pods recover" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] [sig-cloud-provider-gcp] Nodes [Disruptive] Resize [Slow] should be able to add nodes" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim. [sig-storage]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket &#39;asocket&#39; when HostPathType is HostPathBlockDev" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Service endpoints latency should not be very high  [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] HostPathType Character Device [Slow] Should be able to mount character device &#39;achardev&#39; successfully when HostPathType is HostPathUnset" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] [Serial] Volume metrics should create volume metrics in Volume Manager" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-node] Downward API should provide container&#39;s limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes-local  Pod with node different from PV&#39;s NodeAffinity should fail scheduling due to different NodeSelector" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Downward API volume should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ntfs)][sig-windows] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support creating multiple subpath from same volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Pod Disks schedule pods each with a PD, delete pod and verify detach [Slow] for read-only PD with pod delete grace period of &#34;immediate (0s)&#34;" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] IngressClass [Feature:Ingress] should not set default value if no default IngressClass [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce policy based on Ports [Feature:NetworkPolicy]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cli] Kubectl client Kubectl server-side dry-run should check if kubectl can dry-run update Pods [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-apps] DisruptionController evictions: maxUnavailable deny evictions, integer =&gt; should not allow an eviction [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes-local  [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes-local  [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set fsGroup for one pod [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] DNS should provide DNS for the cluster  [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext3)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Sysctls [LinuxOnly] [NodeFeature:Sysctls] should not launch unsafe, but not explicitly enabled sysctls on the node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Flexvolumes should be mountable when attachable [Feature:Flexvolumes]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv6][Experimental][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Generic Ephemeral-volume (default fs) [Feature:GenericEphemeralVolume] (late-binding)] ephemeral should create read-only inline ephemeral volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Secrets should be consumable from pods in volume with mappings [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext3)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if matching" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-windows] [Feature:Windows] SecurityContext RunAsUserName should be able create pods and run containers with a given username" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] [sig-node] Security Context should support container.SecurityContext.RunAsUser [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] [sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by ordering clean reboot and ensure they function upon restart" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller  should check snapshot fields, check restore correctly works after modifying source data, check deletion" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Downward API [Serial] [Disruptive] [NodeFeature:EphemeralStorage] Downward API tests for local ephemeral storage should provide container&#39;s limits.ephemeral-storage and requests.ephemeral-storage as env vars" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing mutating webhooks should work [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ntfs)][sig-windows] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes-local  StatefulSet with pod affinity [Slow] should use volumes spread across nodes when pod management is parallel and pod has anti-affinity" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Docker Containers should use the image defaults if command and args are blank [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Volume Disk Format [Feature:vsphere] verify disk format type - thin is honored for dynamically provisioned pv using storageclass" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] HostPathType File [Slow] Should fail on mounting file &#39;afile&#39; when HostPathType is HostPathCharDev" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Volume limits should verify that all nodes have volume limits" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes-local  [Volume type: tmpfs] Set fsGroup for local volume should set fsGroup for one pod [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Projected secret Should fail non-optional pod creation due to secret object does not exist [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cli] Kubectl client Kubectl diff should check if kubectl diff finds a difference for Deployments [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Inline-volume (default fs)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes:vsphere [Feature:vsphere] should test that a file written to the vsphere volume mount before kubelet restart can be read after restart [Disruptive]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class " classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Secrets should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Projected configMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] Garbage collector should support orphan deletion of custom resources" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Networking Granular Checks: Services should update nodePort: udp [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Kubelet when scheduling a read only busybox container should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Projected configMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-windows] [Feature:Windows] Kubelet-Stats [Serial] Kubelet stats collection for Windows nodes when running 10 pods should return within 10 seconds" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cli] Kubectl client Guestbook application should create and stop a working application  [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes-local  [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set fsGroup for one pod [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing validating webhooks should work [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] should include custom resource definition resources in discovery documents [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] HostPathType Block Device [Slow] Should be able to mount block device &#39;ablkdev&#39; successfully when HostPathType is HostPathBlockDev" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Projected secret should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cli] Kubectl client Kubectl get componentstatuses should get componentstatuses" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-apps] Daemon set [Serial] should rollback without unnecessary restarts [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cli] Kubectl Port forwarding With a server listening on localhost should support forwarding over websockets" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Services should implement service.kubernetes.io/headless" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-scheduling] SchedulerPreemption [Serial] PodTopologySpread Preemption validates proper pods are preempted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support creating multiple subpath from same volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny custom resource creation, update and deletion [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Services should find a service from listing all namespaces [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support creating multiple subpath from same volumes [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Zone Support [Feature:vsphere] Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI mock volume CSIStorageCapacity [Feature:CSIStorageCapacity] CSIStorageCapacity used, no capacity" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support readOnly directory specified in the volumeMount" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] Secrets should patch a secret [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes-local  [Volume type: blockfswithoutformat] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends NO DATA, and disconnects" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Volume Provisioning On Clustered Datastore [Feature:vsphere] verify dynamic provision with default parameter on clustered datastore" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if not matching" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability, datastore and compatible zone specified in storage class" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-apps] DaemonRestart [Disruptive] Kube-proxy should recover after being killed accidentally" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for cronjob" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] [sig-node] Security Context should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] client-go should negotiate watch and report errors with accept &#34;application/json&#34;" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] PersistentVolumes-local  [Volume type: block] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI mock volume storage capacity exhausted, immediate binding" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-apps] Deployment deployment should delete old replica sets [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI mock volume CSI Volume expansion should expand volume without restarting pod if nodeExpansion=off" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc  [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) should scale down with Custom Metric of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-apps] ReplicationController [Flaky] should test the lifecycle of a ReplicationController" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota&#39;s priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists)." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[k8s.io] Sysctls [LinuxOnly] [NodeFeature:Sysctls] should support unsafe sysctls which are actually whitelisted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Inline-volume (ext3)] volumes should store data" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing directory" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] ReplicationController light Should scale from 2 pods to 1 pod [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-network] Ingress API should support creating Ingress API operations [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns." classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields at the schema root [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ntfs)][sig-windows] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if not matching  [Conformance]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)][Slow] volumes should allow exec of files on the volume" classname="Kubernetes e2e suite" time="0">
          <skipped></skipped>
      </testcase>
      <testcase name="[sig-apps] Deployment iterative rollouts should eventually progress" classname="Kubernetes e2e suite" time="357.789199839">
          <failure type="Failure">/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:121&#xA;May  7 23:26:51.226: Unexpected error:&#xA;    &lt;*errors.errorString | 0xc0042d7aa0&gt;: {&#xA;        s: &#34;error waiting for deployment \&#34;webserver\&#34; status to match expectation: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:\&#34;Available\&#34;, Status:\&#34;False\&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:\&#34;MinimumReplicasUnavailable\&#34;, Message:\&#34;Deployment does not have minimum availability.\&#34;}, v1.DeploymentCondition{Type:\&#34;Progressing\&#34;, Status:\&#34;False\&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:\&#34;ProgressDeadlineExceeded\&#34;, Message:\&#34;ReplicaSet \\\&#34;webserver-7ccc6798d4\\\&#34; has timed out progressing.\&#34;}}, CollisionCount:(*int32)(nil)}&#34;,&#xA;    }&#xA;    error waiting for deployment &#34;webserver&#34; status to match expectation: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;occurred&#xA;/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:648</failure>
          <system-out>[BeforeEach] [sig-apps] Deployment&#xA;  /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:174&#xA;STEP: Creating a kubernetes client&#xA;May  7 23:20:53.803: INFO: &gt;&gt;&gt; kubeConfig: /root/.kube/config&#xA;STEP: Building a namespace api object, basename deployment&#xA;May  7 23:20:53.823: INFO: Found PodSecurityPolicies; testing pod creation to see if PodSecurityPolicy is enabled&#xA;May  7 23:20:53.825: INFO: Error creating dryrun pod; assuming PodSecurityPolicy is disabled: admission webhook &#34;cmk.intel.com&#34; does not support dry run&#xA;STEP: Waiting for a default service account to be provisioned in namespace&#xA;[BeforeEach] [sig-apps] Deployment&#xA;  /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:78&#xA;[It] iterative rollouts should eventually progress&#xA;  /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:121&#xA;May  7 23:20:53.827: INFO: Creating deployment &#34;webserver&#34;&#xA;May  7 23:20:53.832: INFO: 00: rolling back a rollout for deployment &#34;webserver&#34;&#xA;May  7 23:20:53.838: INFO: Updating deployment webserver&#xA;May  7 23:20:54.361: INFO: 01: scaling deployment &#34;webserver&#34;&#xA;May  7 23:20:54.366: INFO: Updating deployment webserver&#xA;May  7 23:20:54.400: INFO: 02: triggering a new rollout for deployment &#34;webserver&#34;&#xA;May  7 23:20:54.402: INFO: 02: scaling up&#xA;May  7 23:20:54.406: INFO: Updating deployment webserver&#xA;May  7 23:20:54.406: INFO: 03: triggering a new rollout for deployment &#34;webserver&#34;&#xA;May  7 23:20:56.416: INFO: 03: scaling up&#xA;May  7 23:20:56.422: INFO: Updating deployment webserver&#xA;May  7 23:20:58.550: INFO: 04: rolling back a rollout for deployment &#34;webserver&#34;&#xA;May  7 23:20:58.558: INFO: Updating deployment webserver&#xA;May  7 23:20:58.558: INFO: 05: arbitrarily deleting one or more deployment pods for deployment &#34;webserver&#34;&#xA;May  7 23:20:58.562: INFO: 05: deleting deployment pod &#34;webserver-57c6549b9c-vb96n&#34;&#xA;May  7 23:20:58.569: INFO: 05: deleting deployment pod &#34;webserver-7748f58bfd-mpgjm&#34;&#xA;May  7 23:20:58.576: INFO: 05: deleting deployment pod &#34;webserver-dd94f59b7-vk5s8&#34;&#xA;May  7 23:20:58.584: INFO: 05: deleting deployment pod &#34;webserver-dd94f59b7-xz9dr&#34;&#xA;May  7 23:20:58.595: INFO: 06: resuming deployment &#34;webserver&#34;&#xA;May  7 23:20:58.611: INFO: 06: scaling down&#xA;May  7 23:21:00.617: INFO: 06: scaling down&#xA;May  7 23:21:00.621: INFO: Updating deployment webserver&#xA;May  7 23:21:02.145: INFO: 07: arbitrarily deleting one or more deployment pods for deployment &#34;webserver&#34;&#xA;May  7 23:21:02.148: INFO: 07: deleting deployment pod &#34;webserver-57c6549b9c-gf7pj&#34;&#xA;May  7 23:21:02.155: INFO: 07: deleting deployment pod &#34;webserver-57c6549b9c-rrmtl&#34;&#xA;May  7 23:21:02.163: INFO: 07: deleting deployment pod &#34;webserver-57c6549b9c-tdsx6&#34;&#xA;May  7 23:21:02.169: INFO: 07: deleting deployment pod &#34;webserver-57c6549b9c-whlkd&#34;&#xA;May  7 23:21:02.176: INFO: 07: deleting deployment pod &#34;webserver-7748f58bfd-ln95f&#34;&#xA;May  7 23:21:02.182: INFO: 07: deleting deployment pod &#34;webserver-7748f58bfd-prl8t&#34;&#xA;May  7 23:21:06.715: INFO: 08: scaling deployment &#34;webserver&#34;&#xA;May  7 23:21:06.717: INFO: 08: scaling down&#xA;May  7 23:21:06.723: INFO: Updating deployment webserver&#xA;May  7 23:21:06.723: INFO: 09: resuming deployment &#34;webserver&#34;&#xA;May  7 23:21:06.725: INFO: 09: scaling down&#xA;May  7 23:21:06.728: INFO: Updating deployment webserver&#xA;May  7 23:21:08.796: INFO: 10: resuming deployment &#34;webserver&#34;&#xA;May  7 23:21:08.798: INFO: 10: scaling down&#xA;May  7 23:21:08.802: INFO: Updating deployment webserver&#xA;May  7 23:21:08.802: INFO: 11: scaling deployment &#34;webserver&#34;&#xA;May  7 23:21:08.805: INFO: 11: scaling up&#xA;May  7 23:21:08.807: INFO: Updating deployment webserver&#xA;May  7 23:21:14.630: INFO: 12: arbitrarily deleting one or more deployment pods for deployment &#34;webserver&#34;&#xA;May  7 23:21:14.634: INFO: 12: deleting deployment pod &#34;webserver-57c6549b9c-mr92t&#34;&#xA;May  7 23:21:14.644: INFO: 12: deleting deployment pod &#34;webserver-7748f58bfd-wdhrv&#34;&#xA;May  7 23:21:14.651: INFO: 12: deleting deployment pod &#34;webserver-7748f58bfd-z7lxx&#34;&#xA;May  7 23:21:17.963: INFO: 13: scaling deployment &#34;webserver&#34;&#xA;May  7 23:21:17.966: INFO: 13: scaling down&#xA;May  7 23:21:17.971: INFO: Updating deployment webserver&#xA;May  7 23:21:17.971: INFO: 14: resuming deployment &#34;webserver&#34;&#xA;May  7 23:21:17.974: INFO: 14: scaling up&#xA;May  7 23:21:17.977: INFO: Updating deployment webserver&#xA;May  7 23:21:25.876: INFO: 15: triggering a new rollout for deployment &#34;webserver&#34;&#xA;May  7 23:21:25.878: INFO: 15: scaling down&#xA;May  7 23:21:25.883: INFO: Updating deployment webserver&#xA;May  7 23:21:33.692: INFO: 16: arbitrarily deleting one or more deployment pods for deployment &#34;webserver&#34;&#xA;May  7 23:21:33.695: INFO: 16: deleting deployment pod &#34;webserver-57c6549b9c-sg587&#34;&#xA;May  7 23:21:33.701: INFO: 16: deleting deployment pod &#34;webserver-57c6549b9c-v975h&#34;&#xA;May  7 23:21:33.709: INFO: 16: deleting deployment pod &#34;webserver-69b69768db-xfl87&#34;&#xA;May  7 23:21:33.714: INFO: 16: deleting deployment pod &#34;webserver-7748f58bfd-t7cxv&#34;&#xA;May  7 23:21:43.803: INFO: 17: triggering a new rollout for deployment &#34;webserver&#34;&#xA;May  7 23:21:43.805: INFO: 17: scaling up&#xA;May  7 23:21:43.809: INFO: Updating deployment webserver&#xA;May  7 23:21:43.809: INFO: 18: resuming deployment &#34;webserver&#34;&#xA;May  7 23:21:43.811: INFO: 18: scaling down&#xA;May  7 23:21:43.814: INFO: Updating deployment webserver&#xA;May  7 23:21:49.203: INFO: 19: scaling deployment &#34;webserver&#34;&#xA;May  7 23:21:49.205: INFO: 19: scaling up&#xA;May  7 23:21:49.208: INFO: Updating deployment webserver&#xA;May  7 23:21:49.211: INFO: Waiting for deployment &#34;webserver&#34; to be observed by the controller&#xA;May  7 23:21:51.216: INFO: Waiting for deployment &#34;webserver&#34; status&#xA;May  7 23:21:51.219: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;True&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026509, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ReplicaSetUpdated&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; is progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:21:53.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;True&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026509, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ReplicaSetUpdated&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; is progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:21:55.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;True&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026509, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ReplicaSetUpdated&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; is progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:21:57.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;True&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026509, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ReplicaSetUpdated&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; is progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:21:59.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;True&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026509, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ReplicaSetUpdated&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; is progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:01.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;True&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026509, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ReplicaSetUpdated&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; is progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:03.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;True&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026509, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ReplicaSetUpdated&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; is progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:05.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;True&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026509, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ReplicaSetUpdated&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; is progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:07.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;True&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026509, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ReplicaSetUpdated&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; is progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:09.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;True&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026509, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ReplicaSetUpdated&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; is progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:11.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;True&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026509, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ReplicaSetUpdated&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; is progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:13.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;True&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026509, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ReplicaSetUpdated&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; is progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:15.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;True&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026509, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ReplicaSetUpdated&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; is progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:17.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;True&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026509, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ReplicaSetUpdated&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; is progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:19.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;True&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026509, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ReplicaSetUpdated&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; is progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:21.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:23.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:25.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:27.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:29.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:31.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:33.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:35.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:37.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:39.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:41.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:43.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:45.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:47.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:49.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:51.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:53.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:55.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:57.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:22:59.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:01.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:03.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:05.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:07.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:09.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:11.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:13.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:15.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:17.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:19.224: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:21.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:23.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:25.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:27.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:29.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:31.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:33.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:35.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:37.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:39.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:41.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:43.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:45.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:47.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:49.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:51.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:53.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:55.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:57.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:23:59.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:01.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:03.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:05.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:07.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:09.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:11.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:13.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:15.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:17.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:19.224: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:21.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:23.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:25.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:27.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:29.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:31.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:33.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:35.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:37.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:39.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:41.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:43.224: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:45.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:47.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:49.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:51.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:53.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:55.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:57.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:24:59.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:01.224: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:03.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:05.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:07.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:09.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:11.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:13.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:15.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:17.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:19.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:21.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:23.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:25.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:27.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:29.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:31.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:33.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:35.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:37.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:39.224: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:41.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:43.227: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:45.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:47.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:49.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:51.225: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:53.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:55.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:57.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:25:59.224: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:01.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:03.224: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:05.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:07.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:09.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:11.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:13.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:15.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:17.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:19.225: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:21.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:23.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:25.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:27.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:29.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:31.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:33.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:35.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:37.221: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:39.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:41.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:43.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:45.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:47.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:49.222: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:51.223: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:51.225: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;May  7 23:26:51.226: FAIL: Unexpected error:&#xA;    &lt;*errors.errorString | 0xc0042d7aa0&gt;: {&#xA;        s: &#34;error waiting for deployment \&#34;webserver\&#34; status to match expectation: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:\&#34;Available\&#34;, Status:\&#34;False\&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:\&#34;MinimumReplicasUnavailable\&#34;, Message:\&#34;Deployment does not have minimum availability.\&#34;}, v1.DeploymentCondition{Type:\&#34;Progressing\&#34;, Status:\&#34;False\&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:\&#34;ProgressDeadlineExceeded\&#34;, Message:\&#34;ReplicaSet \\\&#34;webserver-7ccc6798d4\\\&#34; has timed out progressing.\&#34;}}, CollisionCount:(*int32)(nil)}&#34;,&#xA;    }&#xA;    error waiting for deployment &#34;webserver&#34; status to match expectation: deployment status: v1.DeploymentStatus{ObservedGeneration:18, Replicas:7, UpdatedReplicas:3, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:7, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:&#34;Available&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026453, loc:(*time.Location)(0x770c940)}}, Reason:&#34;MinimumReplicasUnavailable&#34;, Message:&#34;Deployment does not have minimum availability.&#34;}, v1.DeploymentCondition{Type:&#34;Progressing&#34;, Status:&#34;False&#34;, LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63756026540, loc:(*time.Location)(0x770c940)}}, Reason:&#34;ProgressDeadlineExceeded&#34;, Message:&#34;ReplicaSet \&#34;webserver-7ccc6798d4\&#34; has timed out progressing.&#34;}}, CollisionCount:(*int32)(nil)}&#xA;occurred&#xA;&#xA;Full Stack Trace&#xA;k8s.io/kubernetes/test/e2e/apps.testIterativeDeployments(0xc0003e7ce0)&#xA;&#x9;/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:648 +0x184f&#xA;k8s.io/kubernetes/test/e2e/apps.glob..func4.8()&#xA;&#x9;/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:122 +0x2a&#xA;k8s.io/kubernetes/test/e2e.RunE2ETests(0xc000601e00)&#xA;&#x9;_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/e2e.go:130 +0x345&#xA;k8s.io/kubernetes/test/e2e.TestE2E(0xc000601e00)&#xA;&#x9;_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/e2e_test.go:145 +0x2b&#xA;testing.tRunner(0xc000601e00, 0x4de37a0)&#xA;&#x9;/usr/local/go/src/testing/testing.go:1123 +0xef&#xA;created by testing.(*T).Run&#xA;&#x9;/usr/local/go/src/testing/testing.go:1168 +0x2b3&#xA;[AfterEach] [sig-apps] Deployment&#xA;  /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:72&#xA;May  7 23:26:51.231: INFO: Deployment &#34;webserver&#34;:&#xA;&amp;Deployment{ObjectMeta:{webserver  deployment-6704 /apis/apps/v1/namespaces/deployment-6704/deployments/webserver 29d41602-f88c-4fc8-be92-91f01e7f8b87 72306 18 2021-05-07 23:20:53 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd] map[deployment.kubernetes.io/revision:6] [] []  []},Spec:DeploymentSpec{Replicas:*5,Selector:&amp;v1.LabelSelector{MatchLabels:map[string]string{name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{      0 0001-01-01 00:00:00 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd] map[] [] []  []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] []  [] [] [{A 2 nil} {A 15 nil} {A 17 nil}] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc002c2d628 &lt;nil&gt; ClusterFirst map[]   &lt;nil&gt;  false false false &lt;nil&gt; &amp;PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} []   nil default-scheduler [] []  &lt;nil&gt; nil [] &lt;nil&gt; &lt;nil&gt; &lt;nil&gt; map[] [] &lt;nil&gt;}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&amp;RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*2,Paused:false,ProgressDeadlineSeconds:*30,},Status:DeploymentStatus{ObservedGeneration:18,Replicas:7,UpdatedReplicas:3,AvailableReplicas:0,UnavailableReplicas:7,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:False,Reason:MinimumReplicasUnavailable,Message:Deployment does not have minimum availability.,LastUpdateTime:2021-05-07 23:20:53 +0000 UTC,LastTransitionTime:2021-05-07 23:20:53 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:False,Reason:ProgressDeadlineExceeded,Message:ReplicaSet &#34;webserver-7ccc6798d4&#34; has timed out progressing.,LastUpdateTime:2021-05-07 23:22:20 +0000 UTC,LastTransitionTime:2021-05-07 23:22:20 +0000 UTC,},},ReadyReplicas:0,CollisionCount:nil,},}&#xA;&#xA;May  7 23:26:51.234: INFO: New ReplicaSet &#34;webserver-7ccc6798d4&#34; of Deployment &#34;webserver&#34;:&#xA;&amp;ReplicaSet{ObjectMeta:{webserver-7ccc6798d4  deployment-6704 /apis/apps/v1/namespaces/deployment-6704/replicasets/webserver-7ccc6798d4 69666fda-c7b8-4c65-9b49-4ef44d9a3f33 72006 4 2021-05-07 23:21:43 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:7ccc6798d4] map[deployment.kubernetes.io/desired-replicas:5 deployment.kubernetes.io/max-replicas:7 deployment.kubernetes.io/revision:6] [{apps/v1 Deployment webserver 29d41602-f88c-4fc8-be92-91f01e7f8b87 0xc002c2dd70 0xc002c2dd71}] []  []},Spec:ReplicaSetSpec{Replicas:*3,Selector:&amp;v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 7ccc6798d4,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{      0 0001-01-01 00:00:00 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:7ccc6798d4] map[] [] []  []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] []  [] [] [{A 2 nil} {A 15 nil} {A 17 nil}] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc002c2ddd0 &lt;nil&gt; ClusterFirst map[]   &lt;nil&gt;  false false false &lt;nil&gt; &amp;PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} []   nil default-scheduler [] []  &lt;nil&gt; nil [] &lt;nil&gt; &lt;nil&gt; &lt;nil&gt; map[] [] &lt;nil&gt;}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:3,FullyLabeledReplicas:3,ObservedGeneration:4,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},}&#xA;May  7 23:26:51.234: INFO: All old ReplicaSets of Deployment &#34;webserver&#34;:&#xA;May  7 23:26:51.234: INFO: &amp;ReplicaSet{ObjectMeta:{webserver-dd94f59b7  deployment-6704 /apis/apps/v1/namespaces/deployment-6704/replicasets/webserver-dd94f59b7 828fe518-fd38-4e1b-9c8f-8dab11211b55 71635 8 2021-05-07 23:20:53 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:dd94f59b7] map[deployment.kubernetes.io/desired-replicas:4 deployment.kubernetes.io/max-replicas:5 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment webserver 29d41602-f88c-4fc8-be92-91f01e7f8b87 0xc002c2de40 0xc002c2de41}] []  [{kube-controller-manager Update apps/v1 2021-05-07 23:21:25 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;.&#34;:{},&#34;f:deployment.kubernetes.io/desired-replicas&#34;:{},&#34;f:deployment.kubernetes.io/max-replicas&#34;:{},&#34;f:deployment.kubernetes.io/revision&#34;:{}},&#34;f:labels&#34;:{&#34;.&#34;:{},&#34;f:name&#34;:{},&#34;f:pod-template-hash&#34;:{}},&#34;f:ownerReferences&#34;:{&#34;.&#34;:{},&#34;k:{\&#34;uid\&#34;:\&#34;29d41602-f88c-4fc8-be92-91f01e7f8b87\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:apiVersion&#34;:{},&#34;f:blockOwnerDeletion&#34;:{},&#34;f:controller&#34;:{},&#34;f:kind&#34;:{},&#34;f:name&#34;:{},&#34;f:uid&#34;:{}}}},&#34;f:spec&#34;:{&#34;f:replicas&#34;:{},&#34;f:selector&#34;:{&#34;f:matchLabels&#34;:{&#34;.&#34;:{},&#34;f:name&#34;:{},&#34;f:pod-template-hash&#34;:{}}},&#34;f:template&#34;:{&#34;f:metadata&#34;:{&#34;f:labels&#34;:{&#34;.&#34;:{},&#34;f:name&#34;:{},&#34;f:pod-template-hash&#34;:{}}},&#34;f:spec&#34;:{&#34;f:containers&#34;:{&#34;k:{\&#34;name\&#34;:\&#34;httpd\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:image&#34;:{},&#34;f:imagePullPolicy&#34;:{},&#34;f:name&#34;:{},&#34;f:resources&#34;:{},&#34;f:securityContext&#34;:{},&#34;f:terminationMessagePath&#34;:{},&#34;f:terminationMessagePolicy&#34;:{}}},&#34;f:dnsPolicy&#34;:{},&#34;f:restartPolicy&#34;:{},&#34;f:schedulerName&#34;:{},&#34;f:securityContext&#34;:{},&#34;f:terminationGracePeriodSeconds&#34;:{}}}},&#34;f:status&#34;:{&#34;f:observedGeneration&#34;:{},&#34;f:replicas&#34;:{}}}}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&amp;v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: dd94f59b7,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{      0 0001-01-01 00:00:00 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:dd94f59b7] map[] [] []  []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc002c2dea8 &lt;nil&gt; ClusterFirst map[]   &lt;nil&gt;  false false false &lt;nil&gt; &amp;PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} []   nil default-scheduler [] []  &lt;nil&gt; nil [] &lt;nil&gt; &lt;nil&gt; &lt;nil&gt; map[] [] &lt;nil&gt;}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:8,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},}&#xA;May  7 23:26:51.235: INFO: &amp;ReplicaSet{ObjectMeta:{webserver-7748f58bfd  deployment-6704 /apis/apps/v1/namespaces/deployment-6704/replicasets/webserver-7748f58bfd 0d4ec377-105c-4bcb-ad7b-5a83b4509d4a 71922 13 2021-05-07 23:20:54 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:7748f58bfd] map[deployment.kubernetes.io/desired-replicas:4 deployment.kubernetes.io/max-replicas:5 deployment.kubernetes.io/revision:4 deployment.kubernetes.io/revision-history:2] [{apps/v1 Deployment webserver 29d41602-f88c-4fc8-be92-91f01e7f8b87 0xc002c2dca0 0xc002c2dca1}] []  [{kube-controller-manager Update apps/v1 2021-05-07 23:21:43 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;.&#34;:{},&#34;f:deployment.kubernetes.io/desired-replicas&#34;:{},&#34;f:deployment.kubernetes.io/max-replicas&#34;:{},&#34;f:deployment.kubernetes.io/revision&#34;:{},&#34;f:deployment.kubernetes.io/revision-history&#34;:{}},&#34;f:labels&#34;:{&#34;.&#34;:{},&#34;f:name&#34;:{},&#34;f:pod-template-hash&#34;:{}},&#34;f:ownerReferences&#34;:{&#34;.&#34;:{},&#34;k:{\&#34;uid\&#34;:\&#34;29d41602-f88c-4fc8-be92-91f01e7f8b87\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:apiVersion&#34;:{},&#34;f:blockOwnerDeletion&#34;:{},&#34;f:controller&#34;:{},&#34;f:kind&#34;:{},&#34;f:name&#34;:{},&#34;f:uid&#34;:{}}}},&#34;f:spec&#34;:{&#34;f:replicas&#34;:{},&#34;f:selector&#34;:{&#34;f:matchLabels&#34;:{&#34;.&#34;:{},&#34;f:name&#34;:{},&#34;f:pod-template-hash&#34;:{}}},&#34;f:template&#34;:{&#34;f:metadata&#34;:{&#34;f:labels&#34;:{&#34;.&#34;:{},&#34;f:name&#34;:{},&#34;f:pod-template-hash&#34;:{}}},&#34;f:spec&#34;:{&#34;f:containers&#34;:{&#34;k:{\&#34;name\&#34;:\&#34;httpd\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:env&#34;:{&#34;.&#34;:{},&#34;k:{\&#34;name\&#34;:\&#34;A\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:name&#34;:{},&#34;f:value&#34;:{}}},&#34;f:image&#34;:{},&#34;f:imagePullPolicy&#34;:{},&#34;f:name&#34;:{},&#34;f:resources&#34;:{},&#34;f:securityContext&#34;:{},&#34;f:terminationMessagePath&#34;:{},&#34;f:terminationMessagePolicy&#34;:{}}},&#34;f:dnsPolicy&#34;:{},&#34;f:restartPolicy&#34;:{},&#34;f:schedulerName&#34;:{},&#34;f:securityContext&#34;:{},&#34;f:terminationGracePeriodSeconds&#34;:{}}}},&#34;f:status&#34;:{&#34;f:observedGeneration&#34;:{},&#34;f:replicas&#34;:{}}}}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&amp;v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 7748f58bfd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{      0 0001-01-01 00:00:00 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:7748f58bfd] map[] [] []  []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] []  [] [] [{A 2 nil}] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc002c2dd08 &lt;nil&gt; ClusterFirst map[]   &lt;nil&gt;  false false false &lt;nil&gt; &amp;PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} []   nil default-scheduler [] []  &lt;nil&gt; nil [] &lt;nil&gt; &lt;nil&gt; &lt;nil&gt; map[] [] &lt;nil&gt;}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:13,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},}&#xA;May  7 23:26:51.235: INFO: &amp;ReplicaSet{ObjectMeta:{webserver-57c6549b9c  deployment-6704 /apis/apps/v1/namespaces/deployment-6704/replicasets/webserver-57c6549b9c 9b7ade00-eb0c-416e-b532-18f2042907fe 71993 11 2021-05-07 23:20:56 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:57c6549b9c] map[deployment.kubernetes.io/desired-replicas:5 deployment.kubernetes.io/max-replicas:7 deployment.kubernetes.io/revision:3] [{apps/v1 Deployment webserver 29d41602-f88c-4fc8-be92-91f01e7f8b87 0xc002c2db20 0xc002c2db21}] []  []},Spec:ReplicaSetSpec{Replicas:*1,Selector:&amp;v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 57c6549b9c,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{      0 0001-01-01 00:00:00 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:57c6549b9c] map[] [] []  []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] []  [] [] [{A 2 nil} {A 3 nil}] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc002c2db78 &lt;nil&gt; ClusterFirst map[]   &lt;nil&gt;  false false false &lt;nil&gt; &amp;PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} []   nil default-scheduler [] []  &lt;nil&gt; nil [] &lt;nil&gt; &lt;nil&gt; &lt;nil&gt; map[] [] &lt;nil&gt;}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:11,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},}&#xA;May  7 23:26:51.235: INFO: &amp;ReplicaSet{ObjectMeta:{webserver-69b69768db  deployment-6704 /apis/apps/v1/namespaces/deployment-6704/replicasets/webserver-69b69768db 43954527-ed30-4257-be3c-f4e49b04a2f4 72007 6 2021-05-07 23:21:25 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:69b69768db] map[deployment.kubernetes.io/desired-replicas:5 deployment.kubernetes.io/max-replicas:7 deployment.kubernetes.io/revision:5] [{apps/v1 Deployment webserver 29d41602-f88c-4fc8-be92-91f01e7f8b87 0xc002c2dbe0 0xc002c2dbe1}] []  []},Spec:ReplicaSetSpec{Replicas:*3,Selector:&amp;v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 69b69768db,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{      0 0001-01-01 00:00:00 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:69b69768db] map[] [] []  []} {[] [] [{httpd docker.io/library/httpd:2.4.38-alpine [] []  [] [] [{A 2 nil} {A 15 nil}] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc002c2dc38 &lt;nil&gt; ClusterFirst map[]   &lt;nil&gt;  false false false &lt;nil&gt; &amp;PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} []   nil default-scheduler [] []  &lt;nil&gt; nil [] &lt;nil&gt; &lt;nil&gt; &lt;nil&gt; map[] [] &lt;nil&gt;}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:3,FullyLabeledReplicas:3,ObservedGeneration:6,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},}&#xA;May  7 23:26:51.239: INFO: Pod &#34;webserver-57c6549b9c-n675z&#34; is not available:&#xA;&amp;Pod{ObjectMeta:{webserver-57c6549b9c-n675z webserver-57c6549b9c- deployment-6704 /api/v1/namespaces/deployment-6704/pods/webserver-57c6549b9c-n675z c9dbd3e4-a606-4b67-9cde-034d267fe481 73400 0 2021-05-07 23:21:33 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:57c6549b9c] map[k8s.v1.cni.cncf.io/network-status:[{&#xA;    &#34;name&#34;: &#34;default-cni-network&#34;,&#xA;    &#34;interface&#34;: &#34;eth0&#34;,&#xA;    &#34;ips&#34;: [&#xA;        &#34;10.244.3.17&#34;&#xA;    ],&#xA;    &#34;mac&#34;: &#34;ae:2a:0d:60:63:af&#34;,&#xA;    &#34;default&#34;: true,&#xA;    &#34;dns&#34;: {}&#xA;}] k8s.v1.cni.cncf.io/networks-status:[{&#xA;    &#34;name&#34;: &#34;default-cni-network&#34;,&#xA;    &#34;interface&#34;: &#34;eth0&#34;,&#xA;    &#34;ips&#34;: [&#xA;        &#34;10.244.3.17&#34;&#xA;    ],&#xA;    &#34;mac&#34;: &#34;ae:2a:0d:60:63:af&#34;,&#xA;    &#34;default&#34;: true,&#xA;    &#34;dns&#34;: {}&#xA;}] kubernetes.io/psp:collectd] [{apps/v1 ReplicaSet webserver-57c6549b9c 9b7ade00-eb0c-416e-b532-18f2042907fe 0xc0003a716f 0xc0003a7180}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-457cw,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&amp;SecretVolumeSource{SecretName:default-token-457cw,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:A,Value:2,ValueFrom:nil,},EnvVar{Name:A,Value:3,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-457cw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&amp;SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&amp;PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:33 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:33 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:33 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:33 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.10.190.207,PodIP:10.244.3.17,StartTime:2021-05-07 23:21:33 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&amp;ContainerStateWaiting{Reason:ImagePullBackOff,Message:Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.3.17,},},EphemeralContainerStatuses:[]ContainerStatus{},},}&#xA;May  7 23:26:51.239: INFO: Pod &#34;webserver-69b69768db-6q8t9&#34; is not available:&#xA;&amp;Pod{ObjectMeta:{webserver-69b69768db-6q8t9 webserver-69b69768db- deployment-6704 /api/v1/namespaces/deployment-6704/pods/webserver-69b69768db-6q8t9 11a31634-f986-4e5d-afa4-07bad43ac6cd 73406 0 2021-05-07 23:21:25 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:69b69768db] map[k8s.v1.cni.cncf.io/network-status:[{&#xA;    &#34;name&#34;: &#34;default-cni-network&#34;,&#xA;    &#34;interface&#34;: &#34;eth0&#34;,&#xA;    &#34;ips&#34;: [&#xA;        &#34;10.244.4.48&#34;&#xA;    ],&#xA;    &#34;mac&#34;: &#34;2e:f6:df:21:14:36&#34;,&#xA;    &#34;default&#34;: true,&#xA;    &#34;dns&#34;: {}&#xA;}] k8s.v1.cni.cncf.io/networks-status:[{&#xA;    &#34;name&#34;: &#34;default-cni-network&#34;,&#xA;    &#34;interface&#34;: &#34;eth0&#34;,&#xA;    &#34;ips&#34;: [&#xA;        &#34;10.244.4.48&#34;&#xA;    ],&#xA;    &#34;mac&#34;: &#34;2e:f6:df:21:14:36&#34;,&#xA;    &#34;default&#34;: true,&#xA;    &#34;dns&#34;: {}&#xA;}] kubernetes.io/psp:collectd] [{apps/v1 ReplicaSet webserver-69b69768db 43954527-ed30-4257-be3c-f4e49b04a2f4 0xc0003a74cf 0xc0003a74e0}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-457cw,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&amp;SecretVolumeSource{SecretName:default-token-457cw,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:A,Value:2,ValueFrom:nil,},EnvVar{Name:A,Value:15,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-457cw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&amp;SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&amp;PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:25 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:25 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:25 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:25 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.10.190.208,PodIP:10.244.4.48,StartTime:2021-05-07 23:21:25 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&amp;ContainerStateWaiting{Reason:ImagePullBackOff,Message:Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.4.48,},},EphemeralContainerStatuses:[]ContainerStatus{},},}&#xA;May  7 23:26:51.240: INFO: Pod &#34;webserver-69b69768db-9p47q&#34; is not available:&#xA;&amp;Pod{ObjectMeta:{webserver-69b69768db-9p47q webserver-69b69768db- deployment-6704 /api/v1/namespaces/deployment-6704/pods/webserver-69b69768db-9p47q faffc008-7485-4705-b1aa-3ad7c4d93655 73322 0 2021-05-07 23:21:33 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:69b69768db] map[k8s.v1.cni.cncf.io/network-status:[{&#xA;    &#34;name&#34;: &#34;default-cni-network&#34;,&#xA;    &#34;interface&#34;: &#34;eth0&#34;,&#xA;    &#34;ips&#34;: [&#xA;        &#34;10.244.3.19&#34;&#xA;    ],&#xA;    &#34;mac&#34;: &#34;0a:3b:2d:47:ca:bc&#34;,&#xA;    &#34;default&#34;: true,&#xA;    &#34;dns&#34;: {}&#xA;}] k8s.v1.cni.cncf.io/networks-status:[{&#xA;    &#34;name&#34;: &#34;default-cni-network&#34;,&#xA;    &#34;interface&#34;: &#34;eth0&#34;,&#xA;    &#34;ips&#34;: [&#xA;        &#34;10.244.3.19&#34;&#xA;    ],&#xA;    &#34;mac&#34;: &#34;0a:3b:2d:47:ca:bc&#34;,&#xA;    &#34;default&#34;: true,&#xA;    &#34;dns&#34;: {}&#xA;}] kubernetes.io/psp:collectd] [{apps/v1 ReplicaSet webserver-69b69768db 43954527-ed30-4257-be3c-f4e49b04a2f4 0xc0003a765f 0xc0003a7670}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-457cw,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&amp;SecretVolumeSource{SecretName:default-token-457cw,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:A,Value:2,ValueFrom:nil,},EnvVar{Name:A,Value:15,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-457cw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&amp;SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&amp;PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:33 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:33 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:33 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:33 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.10.190.207,PodIP:10.244.3.19,StartTime:2021-05-07 23:21:33 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&amp;ContainerStateWaiting{Reason:ImagePullBackOff,Message:Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.3.19,},},EphemeralContainerStatuses:[]ContainerStatus{},},}&#xA;May  7 23:26:51.240: INFO: Pod &#34;webserver-69b69768db-wmwtc&#34; is not available:&#xA;&amp;Pod{ObjectMeta:{webserver-69b69768db-wmwtc webserver-69b69768db- deployment-6704 /api/v1/namespaces/deployment-6704/pods/webserver-69b69768db-wmwtc c68aa970-530d-4c9c-8dd6-917de7df928a 73412 0 2021-05-07 23:21:49 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:69b69768db] map[k8s.v1.cni.cncf.io/network-status:[{&#xA;    &#34;name&#34;: &#34;default-cni-network&#34;,&#xA;    &#34;interface&#34;: &#34;eth0&#34;,&#xA;    &#34;ips&#34;: [&#xA;        &#34;10.244.4.56&#34;&#xA;    ],&#xA;    &#34;mac&#34;: &#34;ae:25:17:c2:68:31&#34;,&#xA;    &#34;default&#34;: true,&#xA;    &#34;dns&#34;: {}&#xA;}] k8s.v1.cni.cncf.io/networks-status:[{&#xA;    &#34;name&#34;: &#34;default-cni-network&#34;,&#xA;    &#34;interface&#34;: &#34;eth0&#34;,&#xA;    &#34;ips&#34;: [&#xA;        &#34;10.244.4.56&#34;&#xA;    ],&#xA;    &#34;mac&#34;: &#34;ae:25:17:c2:68:31&#34;,&#xA;    &#34;default&#34;: true,&#xA;    &#34;dns&#34;: {}&#xA;}] kubernetes.io/psp:collectd] [{apps/v1 ReplicaSet webserver-69b69768db 43954527-ed30-4257-be3c-f4e49b04a2f4 0xc0003a77ef 0xc0003a7810}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-457cw,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&amp;SecretVolumeSource{SecretName:default-token-457cw,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:A,Value:2,ValueFrom:nil,},EnvVar{Name:A,Value:15,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-457cw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&amp;SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&amp;PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:49 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:49 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:49 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:49 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.10.190.208,PodIP:10.244.4.56,StartTime:2021-05-07 23:21:49 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&amp;ContainerStateWaiting{Reason:ImagePullBackOff,Message:Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.4.56,},},EphemeralContainerStatuses:[]ContainerStatus{},},}&#xA;May  7 23:26:51.240: INFO: Pod &#34;webserver-7ccc6798d4-gcnhb&#34; is not available:&#xA;&amp;Pod{ObjectMeta:{webserver-7ccc6798d4-gcnhb webserver-7ccc6798d4- deployment-6704 /api/v1/namespaces/deployment-6704/pods/webserver-7ccc6798d4-gcnhb c1d18565-17dd-44bd-9389-4679d951c26f 73487 0 2021-05-07 23:21:43 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:7ccc6798d4] map[k8s.v1.cni.cncf.io/network-status:[{&#xA;    &#34;name&#34;: &#34;default-cni-network&#34;,&#xA;    &#34;interface&#34;: &#34;eth0&#34;,&#xA;    &#34;ips&#34;: [&#xA;        &#34;10.244.3.20&#34;&#xA;    ],&#xA;    &#34;mac&#34;: &#34;8a:d5:b4:1a:17:21&#34;,&#xA;    &#34;default&#34;: true,&#xA;    &#34;dns&#34;: {}&#xA;}] k8s.v1.cni.cncf.io/networks-status:[{&#xA;    &#34;name&#34;: &#34;default-cni-network&#34;,&#xA;    &#34;interface&#34;: &#34;eth0&#34;,&#xA;    &#34;ips&#34;: [&#xA;        &#34;10.244.3.20&#34;&#xA;    ],&#xA;    &#34;mac&#34;: &#34;8a:d5:b4:1a:17:21&#34;,&#xA;    &#34;default&#34;: true,&#xA;    &#34;dns&#34;: {}&#xA;}] kubernetes.io/psp:collectd] [{apps/v1 ReplicaSet webserver-7ccc6798d4 69666fda-c7b8-4c65-9b49-4ef44d9a3f33 0xc0003a798f 0xc0003a79a0}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-457cw,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&amp;SecretVolumeSource{SecretName:default-token-457cw,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:A,Value:2,ValueFrom:nil,},EnvVar{Name:A,Value:15,ValueFrom:nil,},EnvVar{Name:A,Value:17,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-457cw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&amp;SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&amp;PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:43 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:43 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:43 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:43 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.10.190.207,PodIP:10.244.3.20,StartTime:2021-05-07 23:21:43 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&amp;ContainerStateWaiting{Reason:ImagePullBackOff,Message:Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.3.20,},},EphemeralContainerStatuses:[]ContainerStatus{},},}&#xA;May  7 23:26:51.240: INFO: Pod &#34;webserver-7ccc6798d4-vhdp2&#34; is not available:&#xA;&amp;Pod{ObjectMeta:{webserver-7ccc6798d4-vhdp2 webserver-7ccc6798d4- deployment-6704 /api/v1/namespaces/deployment-6704/pods/webserver-7ccc6798d4-vhdp2 ca976b85-bebd-4848-b021-a8550e97d6b0 73622 0 2021-05-07 23:21:49 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:7ccc6798d4] map[k8s.v1.cni.cncf.io/network-status:[{&#xA;    &#34;name&#34;: &#34;default-cni-network&#34;,&#xA;    &#34;interface&#34;: &#34;eth0&#34;,&#xA;    &#34;ips&#34;: [&#xA;        &#34;10.244.4.55&#34;&#xA;    ],&#xA;    &#34;mac&#34;: &#34;92:0c:7e:67:45:7c&#34;,&#xA;    &#34;default&#34;: true,&#xA;    &#34;dns&#34;: {}&#xA;}] k8s.v1.cni.cncf.io/networks-status:[{&#xA;    &#34;name&#34;: &#34;default-cni-network&#34;,&#xA;    &#34;interface&#34;: &#34;eth0&#34;,&#xA;    &#34;ips&#34;: [&#xA;        &#34;10.244.4.55&#34;&#xA;    ],&#xA;    &#34;mac&#34;: &#34;92:0c:7e:67:45:7c&#34;,&#xA;    &#34;default&#34;: true,&#xA;    &#34;dns&#34;: {}&#xA;}] kubernetes.io/psp:collectd] [{apps/v1 ReplicaSet webserver-7ccc6798d4 69666fda-c7b8-4c65-9b49-4ef44d9a3f33 0xc0003a7b1f 0xc0003a7b30}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-457cw,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&amp;SecretVolumeSource{SecretName:default-token-457cw,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:A,Value:2,ValueFrom:nil,},EnvVar{Name:A,Value:15,ValueFrom:nil,},EnvVar{Name:A,Value:17,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-457cw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&amp;SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&amp;PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:49 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:49 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:49 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:49 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.10.190.208,PodIP:10.244.4.55,StartTime:2021-05-07 23:21:49 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&amp;ContainerStateWaiting{Reason:ImagePullBackOff,Message:Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.4.55,},},EphemeralContainerStatuses:[]ContainerStatus{},},}&#xA;May  7 23:26:51.241: INFO: Pod &#34;webserver-7ccc6798d4-xsdt7&#34; is not available:&#xA;&amp;Pod{ObjectMeta:{webserver-7ccc6798d4-xsdt7 webserver-7ccc6798d4- deployment-6704 /api/v1/namespaces/deployment-6704/pods/webserver-7ccc6798d4-xsdt7 7dd92de9-d0fa-4b3c-a1e2-2abb46beaed2 73388 0 2021-05-07 23:21:43 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[name:httpd pod-template-hash:7ccc6798d4] map[k8s.v1.cni.cncf.io/network-status:[{&#xA;    &#34;name&#34;: &#34;default-cni-network&#34;,&#xA;    &#34;interface&#34;: &#34;eth0&#34;,&#xA;    &#34;ips&#34;: [&#xA;        &#34;10.244.4.54&#34;&#xA;    ],&#xA;    &#34;mac&#34;: &#34;5e:8c:62:fb:7d:e3&#34;,&#xA;    &#34;default&#34;: true,&#xA;    &#34;dns&#34;: {}&#xA;}] k8s.v1.cni.cncf.io/networks-status:[{&#xA;    &#34;name&#34;: &#34;default-cni-network&#34;,&#xA;    &#34;interface&#34;: &#34;eth0&#34;,&#xA;    &#34;ips&#34;: [&#xA;        &#34;10.244.4.54&#34;&#xA;    ],&#xA;    &#34;mac&#34;: &#34;5e:8c:62:fb:7d:e3&#34;,&#xA;    &#34;default&#34;: true,&#xA;    &#34;dns&#34;: {}&#xA;}] kubernetes.io/psp:collectd] [{apps/v1 ReplicaSet webserver-7ccc6798d4 69666fda-c7b8-4c65-9b49-4ef44d9a3f33 0xc0003a7cbf 0xc0003a7cd0}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-457cw,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&amp;SecretVolumeSource{SecretName:default-token-457cw,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:A,Value:2,ValueFrom:nil,},EnvVar{Name:A,Value:15,ValueFrom:nil,},EnvVar{Name:A,Value:17,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-457cw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&amp;SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:node2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&amp;PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:43 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:43 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:43 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-05-07 23:21:43 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.10.190.208,PodIP:10.244.4.54,StartTime:2021-05-07 23:21:43 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&amp;ContainerStateWaiting{Reason:ImagePullBackOff,Message:Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.244.4.54,},},EphemeralContainerStatuses:[]ContainerStatus{},},}&#xA;[AfterEach] [sig-apps] Deployment&#xA;  /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:175&#xA;STEP: Collecting events from namespace &#34;deployment-6704&#34;.&#xA;STEP: Found 310 events.&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-57c6549b9c-25fqh: { } Scheduled: Successfully assigned deployment-6704/webserver-57c6549b9c-25fqh to node1&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-57c6549b9c-gf7pj: { } Scheduled: Successfully assigned deployment-6704/webserver-57c6549b9c-gf7pj to node1&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-57c6549b9c-mr92t: { } Scheduled: Successfully assigned deployment-6704/webserver-57c6549b9c-mr92t to node1&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-57c6549b9c-n675z: { } Scheduled: Successfully assigned deployment-6704/webserver-57c6549b9c-n675z to node1&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-57c6549b9c-pplg5: { } Scheduled: Successfully assigned deployment-6704/webserver-57c6549b9c-pplg5 to node1&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-57c6549b9c-rrmtl: { } Scheduled: Successfully assigned deployment-6704/webserver-57c6549b9c-rrmtl to node2&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-57c6549b9c-s8p7t: { } Scheduled: Successfully assigned deployment-6704/webserver-57c6549b9c-s8p7t to node1&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-57c6549b9c-sg587: { } Scheduled: Successfully assigned deployment-6704/webserver-57c6549b9c-sg587 to node2&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-57c6549b9c-tdsx6: { } Scheduled: Successfully assigned deployment-6704/webserver-57c6549b9c-tdsx6 to node1&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-57c6549b9c-v975h: { } Scheduled: Successfully assigned deployment-6704/webserver-57c6549b9c-v975h to node2&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-57c6549b9c-vb96n: { } Scheduled: Successfully assigned deployment-6704/webserver-57c6549b9c-vb96n to node1&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-57c6549b9c-wd4t8: { } Scheduled: Successfully assigned deployment-6704/webserver-57c6549b9c-wd4t8 to node2&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-57c6549b9c-whlkd: { } Scheduled: Successfully assigned deployment-6704/webserver-57c6549b9c-whlkd to node2&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-57c6549b9c-wsmp2: { } Scheduled: Successfully assigned deployment-6704/webserver-57c6549b9c-wsmp2 to node2&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-57c6549b9c-x5hqw: { } Scheduled: Successfully assigned deployment-6704/webserver-57c6549b9c-x5hqw to node2&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-69b69768db-6q8t9: { } Scheduled: Successfully assigned deployment-6704/webserver-69b69768db-6q8t9 to node2&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-69b69768db-9p47q: { } Scheduled: Successfully assigned deployment-6704/webserver-69b69768db-9p47q to node1&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-69b69768db-kp8mk: { } Scheduled: Successfully assigned deployment-6704/webserver-69b69768db-kp8mk to node2&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-69b69768db-wmwtc: { } Scheduled: Successfully assigned deployment-6704/webserver-69b69768db-wmwtc to node2&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-69b69768db-xfl87: { } Scheduled: Successfully assigned deployment-6704/webserver-69b69768db-xfl87 to node1&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7748f58bfd-7whgt: { } Scheduled: Successfully assigned deployment-6704/webserver-7748f58bfd-7whgt to node2&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7748f58bfd-9xkg7: { } Scheduled: Successfully assigned deployment-6704/webserver-7748f58bfd-9xkg7 to node2&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7748f58bfd-fsnl8: { } Scheduled: Successfully assigned deployment-6704/webserver-7748f58bfd-fsnl8 to node2&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7748f58bfd-jpc89: { } Scheduled: Successfully assigned deployment-6704/webserver-7748f58bfd-jpc89 to node1&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7748f58bfd-lcd48: { } Scheduled: Successfully assigned deployment-6704/webserver-7748f58bfd-lcd48 to node1&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7748f58bfd-ln95f: { } Scheduled: Successfully assigned deployment-6704/webserver-7748f58bfd-ln95f to node2&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7748f58bfd-mpgjm: { } Scheduled: Successfully assigned deployment-6704/webserver-7748f58bfd-mpgjm to node2&#xA;May  7 23:26:51.257: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7748f58bfd-pcnfs: { } Scheduled: Successfully assigned deployment-6704/webserver-7748f58bfd-pcnfs to node2&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7748f58bfd-prl8t: { } Scheduled: Successfully assigned deployment-6704/webserver-7748f58bfd-prl8t to node1&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7748f58bfd-rn2mq: { } Scheduled: Successfully assigned deployment-6704/webserver-7748f58bfd-rn2mq to node1&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7748f58bfd-t7cxv: { } Scheduled: Successfully assigned deployment-6704/webserver-7748f58bfd-t7cxv to node1&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7748f58bfd-wdhrv: { } Scheduled: Successfully assigned deployment-6704/webserver-7748f58bfd-wdhrv to node1&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7748f58bfd-z7lxx: { } Scheduled: Successfully assigned deployment-6704/webserver-7748f58bfd-z7lxx to node1&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7ccc6798d4-gcnhb: { } Scheduled: Successfully assigned deployment-6704/webserver-7ccc6798d4-gcnhb to node1&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7ccc6798d4-vhdp2: { } Scheduled: Successfully assigned deployment-6704/webserver-7ccc6798d4-vhdp2 to node2&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-7ccc6798d4-xsdt7: { } Scheduled: Successfully assigned deployment-6704/webserver-7ccc6798d4-xsdt7 to node2&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-dd94f59b7-2qcrb: { } Scheduled: Successfully assigned deployment-6704/webserver-dd94f59b7-2qcrb to node2&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-dd94f59b7-b5tkx: { } Scheduled: Successfully assigned deployment-6704/webserver-dd94f59b7-b5tkx to node2&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-dd94f59b7-fntfg: { } Scheduled: Successfully assigned deployment-6704/webserver-dd94f59b7-fntfg to node1&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-dd94f59b7-g9pw8: { } Scheduled: Successfully assigned deployment-6704/webserver-dd94f59b7-g9pw8 to node1&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-dd94f59b7-lkts7: { } Scheduled: Successfully assigned deployment-6704/webserver-dd94f59b7-lkts7 to node1&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-dd94f59b7-lxt56: { } Scheduled: Successfully assigned deployment-6704/webserver-dd94f59b7-lxt56 to node2&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-dd94f59b7-q99v6: { } Scheduled: Successfully assigned deployment-6704/webserver-dd94f59b7-q99v6 to node2&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-dd94f59b7-vk5s8: { } Scheduled: Successfully assigned deployment-6704/webserver-dd94f59b7-vk5s8 to node1&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-dd94f59b7-vvbb2: { } Scheduled: Successfully assigned deployment-6704/webserver-dd94f59b7-vvbb2 to node2&#xA;May  7 23:26:51.258: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for webserver-dd94f59b7-xz9dr: { } Scheduled: Successfully assigned deployment-6704/webserver-dd94f59b7-xz9dr to node1&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:53 +0000 UTC - event for webserver: {deployment-controller } ScalingReplicaSet: Scaled up replica set webserver-dd94f59b7 to 6&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:53 +0000 UTC - event for webserver: {deployment-controller } DeploymentRollbackRevisionNotFound: Unable to find last revision.&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:53 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulCreate: Created pod: webserver-dd94f59b7-2qcrb&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:53 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulCreate: Created pod: webserver-dd94f59b7-xz9dr&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:53 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulCreate: Created pod: webserver-dd94f59b7-lkts7&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:53 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulCreate: Created pod: webserver-dd94f59b7-b5tkx&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:53 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulCreate: Created pod: webserver-dd94f59b7-vvbb2&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:53 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulCreate: Created pod: webserver-dd94f59b7-vk5s8&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:54 +0000 UTC - event for webserver: {deployment-controller } ScalingReplicaSet: Scaled up replica set webserver-7748f58bfd to 2&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:54 +0000 UTC - event for webserver: {deployment-controller } ScalingReplicaSet: Scaled down replica set webserver-dd94f59b7 to 6&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:54 +0000 UTC - event for webserver: {deployment-controller } ScalingReplicaSet: Scaled up replica set webserver-7748f58bfd to 3&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:54 +0000 UTC - event for webserver: {deployment-controller } ScalingReplicaSet: Scaled up replica set webserver-dd94f59b7 to 7&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:54 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulCreate: Created pod: webserver-7748f58bfd-prl8t&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:54 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulCreate: Created pod: webserver-7748f58bfd-ln95f&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:54 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulCreate: Created pod: webserver-7748f58bfd-mpgjm&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:54 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-dd94f59b7-fntfg&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:54 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulCreate: Created pod: webserver-dd94f59b7-fntfg&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:56 +0000 UTC - event for webserver: {deployment-controller } ScalingReplicaSet: Scaled up replica set webserver-57c6549b9c to 4&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:56 +0000 UTC - event for webserver: {deployment-controller } ScalingReplicaSet: Scaled down replica set webserver-dd94f59b7 to 3&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:56 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulCreate: Created pod: webserver-57c6549b9c-rrmtl&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:56 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulCreate: Created pod: webserver-57c6549b9c-vb96n&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:56 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulCreate: Created pod: webserver-57c6549b9c-whlkd&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:56 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulCreate: Created pod: webserver-57c6549b9c-gf7pj&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:56 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-dd94f59b7-q99v6&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:56 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-dd94f59b7-b5tkx&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:56 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-dd94f59b7-vvbb2&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:56 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-dd94f59b7-2qcrb&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:56 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulCreate: Created pod: webserver-dd94f59b7-q99v6&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:57 +0000 UTC - event for webserver-dd94f59b7-lkts7: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:57 +0000 UTC - event for webserver-dd94f59b7-lkts7: {multus } AddedInterface: Add eth0 [10.244.3.246/24]&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:57 +0000 UTC - event for webserver-dd94f59b7-vk5s8: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:57 +0000 UTC - event for webserver-dd94f59b7-vk5s8: {multus } AddedInterface: Add eth0 [10.244.3.247/24]&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:57 +0000 UTC - event for webserver-dd94f59b7-xz9dr: {multus } AddedInterface: Add eth0 [10.244.3.245/24]&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:57 +0000 UTC - event for webserver-dd94f59b7-xz9dr: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:58 +0000 UTC - event for webserver: {deployment-controller } ScalingReplicaSet: Scaled up replica set webserver-7748f58bfd to 4&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:58 +0000 UTC - event for webserver: {deployment-controller } ScalingReplicaSet: Scaled down replica set webserver-dd94f59b7 to 2&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:58 +0000 UTC - event for webserver: {deployment-controller } DeploymentRollback: Rolled back deployment &#34;webserver&#34; to revision 2&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:58 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulCreate: Created pod: webserver-57c6549b9c-tdsx6&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:58 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulCreate: Created pod: webserver-7748f58bfd-z7lxx&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:58 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulCreate: Created pod: webserver-7748f58bfd-pcnfs&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:58 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulCreate: (combined from similar events): Created pod: webserver-dd94f59b7-g9pw8&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:58 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-dd94f59b7-lkts7&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:58 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulCreate: Created pod: webserver-dd94f59b7-lxt56&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:58 +0000 UTC - event for webserver-dd94f59b7-2qcrb: {kubelet node2} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to start sandbox container for pod &#34;webserver-dd94f59b7-2qcrb&#34;: Error response from daemon: OCI runtime create failed: container_linux.go:349: starting container process caused &#34;process_linux.go:315: copying bootstrap data to pipe caused \&#34;write init-p: broken pipe\&#34;&#34;: unknown&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:58 +0000 UTC - event for webserver-dd94f59b7-xz9dr: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:58 +0000 UTC - event for webserver-dd94f59b7-xz9dr: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:59 +0000 UTC - event for webserver-dd94f59b7-lkts7: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:20:59 +0000 UTC - event for webserver-dd94f59b7-lkts7: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:00 +0000 UTC - event for webserver: {deployment-controller } ScalingReplicaSet: (combined from similar events): Scaled down replica set webserver-57c6549b9c to 2&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:00 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-7748f58bfd-pcnfs&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:00 +0000 UTC - event for webserver-7748f58bfd-prl8t: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:00 +0000 UTC - event for webserver-7748f58bfd-prl8t: {multus } AddedInterface: Add eth0 [10.244.3.249/24]&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:00 +0000 UTC - event for webserver-dd94f59b7-vk5s8: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:00 +0000 UTC - event for webserver-dd94f59b7-vk5s8: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:01 +0000 UTC - event for webserver-7748f58bfd-ln95f: {multus } AddedInterface: Add eth0 [10.244.4.33/24]&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:01 +0000 UTC - event for webserver-7748f58bfd-ln95f: {kubelet node2} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:01 +0000 UTC - event for webserver-7748f58bfd-prl8t: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:01 +0000 UTC - event for webserver-7748f58bfd-prl8t: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:02 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulCreate: Created pod: webserver-57c6549b9c-s8p7t&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:02 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulCreate: Created pod: webserver-57c6549b9c-pplg5&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:02 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulCreate: Created pod: webserver-57c6549b9c-x5hqw&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:02 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulCreate: Created pod: webserver-57c6549b9c-sg587&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:02 +0000 UTC - event for webserver-57c6549b9c-gf7pj: {multus } AddedInterface: Add eth0 [10.244.3.250/24]&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:02 +0000 UTC - event for webserver-57c6549b9c-gf7pj: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:02 +0000 UTC - event for webserver-57c6549b9c-vb96n: {kubelet node1} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to start sandbox container for pod &#34;webserver-57c6549b9c-vb96n&#34;: Error response from daemon: OCI runtime create failed: container_linux.go:349: starting container process caused &#34;process_linux.go:365: sending config to init process caused \&#34;write init-p: broken pipe\&#34;&#34;: unknown&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:02 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulCreate: Created pod: webserver-7748f58bfd-lcd48&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:02 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulCreate: Created pod: webserver-7748f58bfd-9xkg7&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:02 +0000 UTC - event for webserver-7748f58bfd-mpgjm: {kubelet node2} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:02 +0000 UTC - event for webserver-dd94f59b7-b5tkx: {kubelet node2} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to start sandbox container for pod &#34;webserver-dd94f59b7-b5tkx&#34;: Error response from daemon: OCI runtime create failed: container_linux.go:349: starting container process caused &#34;process_linux.go:449: container init caused \&#34;process_linux.go:432: running prestart hook 0 caused \\\&#34;error running hook: exit status 1, stdout: , stderr: time=\\\\\\\&#34;2021-05-07T23:20:59Z\\\\\\\&#34; level=fatal msg=\\\\\\\&#34;no such file or directory\\\\\\\&#34;\\\\n\\\&#34;\&#34;&#34;: unknown&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:02 +0000 UTC - event for webserver-dd94f59b7-vvbb2: {kubelet node2} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to start sandbox container for pod &#34;webserver-dd94f59b7-vvbb2&#34;: Error response from daemon: OCI runtime create failed: container_linux.go:349: starting container process caused &#34;process_linux.go:449: container init caused \&#34;\&#34;&#34;: unknown&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:03 +0000 UTC - event for webserver-57c6549b9c-gf7pj: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:03 +0000 UTC - event for webserver-57c6549b9c-gf7pj: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.258: INFO: At 2021-05-07 23:21:03 +0000 UTC - event for webserver-57c6549b9c-rrmtl: {kubelet node2} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:03 +0000 UTC - event for webserver-57c6549b9c-tdsx6: {kubelet node1} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to start sandbox container for pod &#34;webserver-57c6549b9c-tdsx6&#34;: Error response from daemon: OCI runtime create failed: container_linux.go:349: starting container process caused &#34;process_linux.go:449: container init caused \&#34;\&#34;&#34;: unknown&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:03 +0000 UTC - event for webserver-7748f58bfd-ln95f: {kubelet node2} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:03 +0000 UTC - event for webserver-7748f58bfd-ln95f: {kubelet node2} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:04 +0000 UTC - event for webserver-57c6549b9c-whlkd: {kubelet node2} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container &#34;9575da493a5ea555497d8ae056ec4b753428aa31c10d1fa55204c66dc0e15a76&#34; network for pod &#34;webserver-57c6549b9c-whlkd&#34;: networkPlugin cni failed to set up pod &#34;webserver-57c6549b9c-whlkd_deployment-6704&#34; network: [deployment-6704/webserver-57c6549b9c-whlkd:default-cni-network]: error adding container to network &#34;default-cni-network&#34;: failed to open netns &#34;/proc/17295/ns/net&#34;: failed to Statfs &#34;/proc/17295/ns/net&#34;: no such file or directory&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:04 +0000 UTC - event for webserver-7748f58bfd-mpgjm: {kubelet node2} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:04 +0000 UTC - event for webserver-7748f58bfd-mpgjm: {kubelet node2} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:04 +0000 UTC - event for webserver-7748f58bfd-z7lxx: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:04 +0000 UTC - event for webserver-7748f58bfd-z7lxx: {multus } AddedInterface: Add eth0 [10.244.3.251/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:05 +0000 UTC - event for webserver-57c6549b9c-rrmtl: {kubelet node2} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:05 +0000 UTC - event for webserver-57c6549b9c-rrmtl: {kubelet node2} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:05 +0000 UTC - event for webserver-7748f58bfd-z7lxx: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:05 +0000 UTC - event for webserver-7748f58bfd-z7lxx: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:05 +0000 UTC - event for webserver-dd94f59b7-g9pw8: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:05 +0000 UTC - event for webserver-dd94f59b7-g9pw8: {multus } AddedInterface: Add eth0 [10.244.3.252/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:06 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-57c6549b9c-pplg5&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:06 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-7748f58bfd-lcd48&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:06 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulCreate: Created pod: webserver-7748f58bfd-jpc89&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:06 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-dd94f59b7-lxt56&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:07 +0000 UTC - event for webserver-57c6549b9c-pplg5: {kubelet node1} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container &#34;49fe91bf1332825ae0fcc7b3e3be4ae7c7ea6abacc9c4d18f2f9572579d49958&#34; network for pod &#34;webserver-57c6549b9c-pplg5&#34;: networkPlugin cni failed to set up pod &#34;webserver-57c6549b9c-pplg5_deployment-6704&#34; network: [deployment-6704/webserver-57c6549b9c-pplg5:default-cni-network]: error adding container to network &#34;default-cni-network&#34;: failed to open netns &#34;/proc/17164/ns/net&#34;: failed to Statfs &#34;/proc/17164/ns/net&#34;: no such file or directory&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:07 +0000 UTC - event for webserver-57c6549b9c-s8p7t: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:07 +0000 UTC - event for webserver-57c6549b9c-s8p7t: {multus } AddedInterface: Add eth0 [10.244.3.253/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:07 +0000 UTC - event for webserver-dd94f59b7-g9pw8: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:07 +0000 UTC - event for webserver-dd94f59b7-g9pw8: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:07 +0000 UTC - event for webserver-dd94f59b7-lxt56: {multus } AddedInterface: Add eth0 [10.244.4.36/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:08 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulCreate: (combined from similar events): Created pod: webserver-57c6549b9c-wd4t8&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:08 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-57c6549b9c-s8p7t&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:08 +0000 UTC - event for webserver-57c6549b9c-x5hqw: {kubelet node2} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:08 +0000 UTC - event for webserver-57c6549b9c-x5hqw: {multus } AddedInterface: Add eth0 [10.244.4.37/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:08 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-7748f58bfd-jpc89&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:08 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulCreate: Created pod: webserver-7748f58bfd-wdhrv&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:08 +0000 UTC - event for webserver-7748f58bfd-9xkg7: {multus } AddedInterface: Add eth0 [10.244.4.38/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:08 +0000 UTC - event for webserver-7748f58bfd-9xkg7: {kubelet node2} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:08 +0000 UTC - event for webserver-7748f58bfd-lcd48: {kubelet node1} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to start sandbox container for pod &#34;webserver-7748f58bfd-lcd48&#34;: Error response from daemon: OCI runtime create failed: container_linux.go:349: starting container process caused &#34;process_linux.go:449: container init caused \&#34;\&#34;&#34;: unknown&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:08 +0000 UTC - event for webserver-7748f58bfd-z7lxx: {kubelet node1} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:08 +0000 UTC - event for webserver-7748f58bfd-z7lxx: {kubelet node1} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:08 +0000 UTC - event for webserver-dd94f59b7-g9pw8: {kubelet node1} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:08 +0000 UTC - event for webserver-dd94f59b7-g9pw8: {kubelet node1} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:08 +0000 UTC - event for webserver-dd94f59b7-lxt56: {kubelet node2} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container &#34;6d8432235d35fc21909af60be75453a7057cd5f0c18ed05401cd10354dfe66a6&#34; network for pod &#34;webserver-dd94f59b7-lxt56&#34;: networkPlugin cni failed to set up pod &#34;webserver-dd94f59b7-lxt56_deployment-6704&#34; network: Multus: [deployment-6704/webserver-dd94f59b7-lxt56]: error setting the networks status: SetNetworkStatus: failed to query the pod webserver-dd94f59b7-lxt56 in out of cluster comm: pods &#34;webserver-dd94f59b7-lxt56&#34; not found&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:09 +0000 UTC - event for webserver-57c6549b9c-s8p7t: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:09 +0000 UTC - event for webserver-57c6549b9c-s8p7t: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:09 +0000 UTC - event for webserver-57c6549b9c-x5hqw: {kubelet node2} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:09 +0000 UTC - event for webserver-57c6549b9c-x5hqw: {kubelet node2} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:09 +0000 UTC - event for webserver-57c6549b9c-x5hqw: {kubelet node2} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:09 +0000 UTC - event for webserver-57c6549b9c-x5hqw: {kubelet node2} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:09 +0000 UTC - event for webserver-7748f58bfd-jpc89: {kubelet node1} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to start sandbox container for pod &#34;webserver-7748f58bfd-jpc89&#34;: Error response from daemon: OCI runtime create failed: container_linux.go:349: starting container process caused &#34;process_linux.go:449: container init caused \&#34;\&#34;&#34;: unknown&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:10 +0000 UTC - event for webserver-57c6549b9c-sg587: {kubelet node2} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:10 +0000 UTC - event for webserver-57c6549b9c-sg587: {multus } AddedInterface: Add eth0 [10.244.4.40/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:10 +0000 UTC - event for webserver-7748f58bfd-9xkg7: {kubelet node2} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:10 +0000 UTC - event for webserver-7748f58bfd-9xkg7: {kubelet node2} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:11 +0000 UTC - event for webserver-7748f58bfd-9xkg7: {kubelet node2} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:11 +0000 UTC - event for webserver-7748f58bfd-9xkg7: {kubelet node2} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:13 +0000 UTC - event for webserver-57c6549b9c-mr92t: {multus } AddedInterface: Add eth0 [10.244.3.5/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:13 +0000 UTC - event for webserver-57c6549b9c-mr92t: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:13 +0000 UTC - event for webserver-7748f58bfd-wdhrv: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:13 +0000 UTC - event for webserver-7748f58bfd-wdhrv: {multus } AddedInterface: Add eth0 [10.244.3.3/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:14 +0000 UTC - event for webserver-57c6549b9c-sg587: {kubelet node2} SandboxChanged: Pod sandbox changed, it will be killed and re-created.&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:14 +0000 UTC - event for webserver-57c6549b9c-sg587: {kubelet node2} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:14 +0000 UTC - event for webserver-57c6549b9c-sg587: {kubelet node2} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:14 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulCreate: (combined from similar events): Created pod: webserver-7748f58bfd-fsnl8&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:15 +0000 UTC - event for webserver-7748f58bfd-wdhrv: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:15 +0000 UTC - event for webserver-7748f58bfd-wdhrv: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:16 +0000 UTC - event for webserver-57c6549b9c-mr92t: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:16 +0000 UTC - event for webserver-57c6549b9c-mr92t: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:17 +0000 UTC - event for webserver: {deployment-controller } ScalingReplicaSet: Scaled up replica set webserver-57c6549b9c to 3&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:17 +0000 UTC - event for webserver: {deployment-controller } ScalingReplicaSet: Scaled down replica set webserver-7748f58bfd to 2&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:17 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-57c6549b9c-x5hqw&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:17 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-7748f58bfd-7whgt&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:18 +0000 UTC - event for webserver-57c6549b9c-sg587: {multus } AddedInterface: Add eth0 [10.244.4.43/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:18 +0000 UTC - event for webserver-57c6549b9c-sg587: {kubelet node2} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:18 +0000 UTC - event for webserver-57c6549b9c-sg587: {kubelet node2} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:18 +0000 UTC - event for webserver-7748f58bfd-7whgt: {multus } AddedInterface: Add eth0 [10.244.4.42/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:19 +0000 UTC - event for webserver-57c6549b9c-25fqh: {multus } AddedInterface: Add eth0 [10.244.3.10/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:19 +0000 UTC - event for webserver-7748f58bfd-7whgt: {kubelet node2} FailedCreatePodSandBox: Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container &#34;5a2b2d229247bcb87c1dcd398af2c28bad77a6106eff0a279d7f87643e700bd1&#34; network for pod &#34;webserver-7748f58bfd-7whgt&#34;: networkPlugin cni failed to set up pod &#34;webserver-7748f58bfd-7whgt_deployment-6704&#34; network: Multus: [deployment-6704/webserver-7748f58bfd-7whgt]: error setting the networks status: SetNetworkStatus: failed to query the pod webserver-7748f58bfd-7whgt in out of cluster comm: pods &#34;webserver-7748f58bfd-7whgt&#34; not found&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:20 +0000 UTC - event for webserver-57c6549b9c-25fqh: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:20 +0000 UTC - event for webserver-57c6549b9c-v975h: {kubelet node2} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:20 +0000 UTC - event for webserver-57c6549b9c-v975h: {multus } AddedInterface: Add eth0 [10.244.4.44/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:20 +0000 UTC - event for webserver-7748f58bfd-t7cxv: {multus } AddedInterface: Add eth0 [10.244.3.11/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:20 +0000 UTC - event for webserver-7748f58bfd-t7cxv: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:21 +0000 UTC - event for webserver-57c6549b9c-25fqh: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:21 +0000 UTC - event for webserver-57c6549b9c-25fqh: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:21 +0000 UTC - event for webserver-57c6549b9c-25fqh: {kubelet node1} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:21 +0000 UTC - event for webserver-57c6549b9c-25fqh: {kubelet node1} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:21 +0000 UTC - event for webserver-57c6549b9c-v975h: {kubelet node2} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:21 +0000 UTC - event for webserver-57c6549b9c-v975h: {kubelet node2} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:21 +0000 UTC - event for webserver-57c6549b9c-v975h: {kubelet node2} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:21 +0000 UTC - event for webserver-57c6549b9c-v975h: {kubelet node2} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:21 +0000 UTC - event for webserver-7748f58bfd-rn2mq: {multus } AddedInterface: Add eth0 [10.244.3.12/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:21 +0000 UTC - event for webserver-7748f58bfd-rn2mq: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:22 +0000 UTC - event for webserver-57c6549b9c-sg587: {multus } AddedInterface: Add eth0 [10.244.4.45/24]&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:22 +0000 UTC - event for webserver-7748f58bfd-t7cxv: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:22 +0000 UTC - event for webserver-7748f58bfd-t7cxv: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:22 +0000 UTC - event for webserver-7748f58bfd-t7cxv: {kubelet node1} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.259: INFO: At 2021-05-07 23:21:22 +0000 UTC - event for webserver-7748f58bfd-t7cxv: {kubelet node1} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:23 +0000 UTC - event for webserver-7748f58bfd-rn2mq: {kubelet node1} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:23 +0000 UTC - event for webserver-7748f58bfd-rn2mq: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:23 +0000 UTC - event for webserver-7748f58bfd-rn2mq: {kubelet node1} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:23 +0000 UTC - event for webserver-7748f58bfd-rn2mq: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:24 +0000 UTC - event for webserver-57c6549b9c-sg587: {multus } AddedInterface: Add eth0 [10.244.4.46/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:25 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-57c6549b9c-25fqh&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:25 +0000 UTC - event for webserver-69b69768db: {replicaset-controller } SuccessfulCreate: Created pod: webserver-69b69768db-xfl87&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:25 +0000 UTC - event for webserver-69b69768db: {replicaset-controller } SuccessfulCreate: Created pod: webserver-69b69768db-6q8t9&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:25 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-7748f58bfd-9xkg7&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:25 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-7748f58bfd-rn2mq&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:25 +0000 UTC - event for webserver-dd94f59b7: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-dd94f59b7-g9pw8&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:27 +0000 UTC - event for webserver-69b69768db-6q8t9: {multus } AddedInterface: Add eth0 [10.244.4.47/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:27 +0000 UTC - event for webserver-69b69768db-6q8t9: {kubelet node2} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:28 +0000 UTC - event for webserver-69b69768db-6q8t9: {kubelet node2} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:28 +0000 UTC - event for webserver-69b69768db-6q8t9: {kubelet node2} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:28 +0000 UTC - event for webserver-69b69768db-xfl87: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:28 +0000 UTC - event for webserver-69b69768db-xfl87: {multus } AddedInterface: Add eth0 [10.244.3.16/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:29 +0000 UTC - event for webserver-69b69768db-6q8t9: {kubelet node2} SandboxChanged: Pod sandbox changed, it will be killed and re-created.&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:30 +0000 UTC - event for webserver-69b69768db-6q8t9: {kubelet node2} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:30 +0000 UTC - event for webserver-69b69768db-6q8t9: {multus } AddedInterface: Add eth0 [10.244.4.48/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:30 +0000 UTC - event for webserver-69b69768db-6q8t9: {kubelet node2} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:30 +0000 UTC - event for webserver-69b69768db-xfl87: {kubelet node1} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:30 +0000 UTC - event for webserver-69b69768db-xfl87: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:30 +0000 UTC - event for webserver-69b69768db-xfl87: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:30 +0000 UTC - event for webserver-69b69768db-xfl87: {kubelet node1} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:33 +0000 UTC - event for webserver-69b69768db: {replicaset-controller } SuccessfulCreate: Created pod: webserver-69b69768db-9p47q&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:35 +0000 UTC - event for webserver-57c6549b9c-n675z: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:35 +0000 UTC - event for webserver-57c6549b9c-n675z: {multus } AddedInterface: Add eth0 [10.244.3.17/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:35 +0000 UTC - event for webserver-57c6549b9c-wsmp2: {multus } AddedInterface: Add eth0 [10.244.4.49/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:36 +0000 UTC - event for webserver-57c6549b9c-wsmp2: {kubelet node2} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:36 +0000 UTC - event for webserver-7748f58bfd-fsnl8: {kubelet node2} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:36 +0000 UTC - event for webserver-7748f58bfd-fsnl8: {multus } AddedInterface: Add eth0 [10.244.4.50/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:37 +0000 UTC - event for webserver-57c6549b9c-n675z: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:37 +0000 UTC - event for webserver-57c6549b9c-n675z: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:37 +0000 UTC - event for webserver-57c6549b9c-wsmp2: {kubelet node2} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:37 +0000 UTC - event for webserver-57c6549b9c-wsmp2: {kubelet node2} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:37 +0000 UTC - event for webserver-57c6549b9c-wsmp2: {kubelet node2} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:37 +0000 UTC - event for webserver-57c6549b9c-wsmp2: {kubelet node2} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:37 +0000 UTC - event for webserver-69b69768db-9p47q: {multus } AddedInterface: Add eth0 [10.244.3.18/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:37 +0000 UTC - event for webserver-69b69768db-9p47q: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:38 +0000 UTC - event for webserver-57c6549b9c-n675z: {kubelet node1} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:38 +0000 UTC - event for webserver-57c6549b9c-n675z: {kubelet node1} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:38 +0000 UTC - event for webserver-69b69768db-9p47q: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:38 +0000 UTC - event for webserver-69b69768db-9p47q: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:38 +0000 UTC - event for webserver-7748f58bfd-fsnl8: {kubelet node2} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:38 +0000 UTC - event for webserver-7748f58bfd-fsnl8: {kubelet node2} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:38 +0000 UTC - event for webserver-7748f58bfd-fsnl8: {kubelet node2} SandboxChanged: Pod sandbox changed, it will be killed and re-created.&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:39 +0000 UTC - event for webserver-69b69768db-9p47q: {kubelet node1} SandboxChanged: Pod sandbox changed, it will be killed and re-created.&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:40 +0000 UTC - event for webserver-69b69768db-9p47q: {multus } AddedInterface: Add eth0 [10.244.3.19/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:40 +0000 UTC - event for webserver-69b69768db-9p47q: {kubelet node1} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:40 +0000 UTC - event for webserver-69b69768db-9p47q: {kubelet node1} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:41 +0000 UTC - event for webserver-7748f58bfd-fsnl8: {kubelet node2} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:41 +0000 UTC - event for webserver-7748f58bfd-fsnl8: {multus } AddedInterface: Add eth0 [10.244.4.51/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:41 +0000 UTC - event for webserver-7748f58bfd-fsnl8: {kubelet node2} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:43 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-57c6549b9c-wd4t8&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:43 +0000 UTC - event for webserver-57c6549b9c: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-57c6549b9c-wsmp2&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:43 +0000 UTC - event for webserver-69b69768db: {replicaset-controller } SuccessfulCreate: Created pod: webserver-69b69768db-kp8mk&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:43 +0000 UTC - event for webserver-69b69768db: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-69b69768db-kp8mk&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:43 +0000 UTC - event for webserver-7748f58bfd: {replicaset-controller } SuccessfulDelete: Deleted pod: webserver-7748f58bfd-fsnl8&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:43 +0000 UTC - event for webserver-7748f58bfd-fsnl8: {multus } AddedInterface: Add eth0 [10.244.4.52/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:43 +0000 UTC - event for webserver-7ccc6798d4: {replicaset-controller } SuccessfulCreate: Created pod: webserver-7ccc6798d4-xsdt7&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:43 +0000 UTC - event for webserver-7ccc6798d4: {replicaset-controller } SuccessfulCreate: Created pod: webserver-7ccc6798d4-gcnhb&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:45 +0000 UTC - event for webserver-7ccc6798d4-gcnhb: {kubelet node1} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:45 +0000 UTC - event for webserver-7ccc6798d4-gcnhb: {multus } AddedInterface: Add eth0 [10.244.3.20/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:45 +0000 UTC - event for webserver-7ccc6798d4-xsdt7: {multus } AddedInterface: Add eth0 [10.244.4.53/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:45 +0000 UTC - event for webserver-7ccc6798d4-xsdt7: {kubelet node2} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:46 +0000 UTC - event for webserver-7ccc6798d4-xsdt7: {kubelet node2} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:46 +0000 UTC - event for webserver-7ccc6798d4-xsdt7: {kubelet node2} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:47 +0000 UTC - event for webserver-7ccc6798d4-gcnhb: {kubelet node1} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:47 +0000 UTC - event for webserver-7ccc6798d4-gcnhb: {kubelet node1} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:47 +0000 UTC - event for webserver-7ccc6798d4-gcnhb: {kubelet node1} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:47 +0000 UTC - event for webserver-7ccc6798d4-gcnhb: {kubelet node1} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:47 +0000 UTC - event for webserver-7ccc6798d4-xsdt7: {kubelet node2} SandboxChanged: Pod sandbox changed, it will be killed and re-created.&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:49 +0000 UTC - event for webserver-69b69768db: {replicaset-controller } SuccessfulCreate: Created pod: webserver-69b69768db-wmwtc&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:49 +0000 UTC - event for webserver-7ccc6798d4: {replicaset-controller } SuccessfulCreate: Created pod: webserver-7ccc6798d4-vhdp2&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:49 +0000 UTC - event for webserver-7ccc6798d4-xsdt7: {multus } AddedInterface: Add eth0 [10.244.4.54/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:49 +0000 UTC - event for webserver-7ccc6798d4-xsdt7: {kubelet node2} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:49 +0000 UTC - event for webserver-7ccc6798d4-xsdt7: {kubelet node2} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:50 +0000 UTC - event for webserver-7ccc6798d4-vhdp2: {multus } AddedInterface: Add eth0 [10.244.4.55/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:50 +0000 UTC - event for webserver-7ccc6798d4-vhdp2: {kubelet node2} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:51 +0000 UTC - event for webserver-69b69768db-wmwtc: {kubelet node2} Pulling: Pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:51 +0000 UTC - event for webserver-69b69768db-wmwtc: {multus } AddedInterface: Add eth0 [10.244.4.56/24]&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:51 +0000 UTC - event for webserver-7ccc6798d4-vhdp2: {kubelet node2} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:51 +0000 UTC - event for webserver-7ccc6798d4-vhdp2: {kubelet node2} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:51 +0000 UTC - event for webserver-7ccc6798d4-vhdp2: {kubelet node2} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:51 +0000 UTC - event for webserver-7ccc6798d4-vhdp2: {kubelet node2} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:52 +0000 UTC - event for webserver-69b69768db-wmwtc: {kubelet node2} Failed: Error: ImagePullBackOff&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:52 +0000 UTC - event for webserver-69b69768db-wmwtc: {kubelet node2} Failed: Failed to pull image &#34;docker.io/library/httpd:2.4.38-alpine&#34;: rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:52 +0000 UTC - event for webserver-69b69768db-wmwtc: {kubelet node2} Failed: Error: ErrImagePull&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:21:52 +0000 UTC - event for webserver-69b69768db-wmwtc: {kubelet node2} BackOff: Back-off pulling image &#34;docker.io/library/httpd:2.4.38-alpine&#34;&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:22:58 +0000 UTC - event for webserver-dd94f59b7-fntfg: {kubelet node1} FailedMount: Unable to attach or mount volumes: unmounted volumes=[default-token-457cw], unattached volumes=[default-token-457cw]: timed out waiting for the condition&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:23:00 +0000 UTC - event for webserver-dd94f59b7-q99v6: {kubelet node2} FailedMount: Unable to attach or mount volumes: unmounted volumes=[default-token-457cw], unattached volumes=[default-token-457cw]: timed out waiting for the condition&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:23:03 +0000 UTC - event for webserver-dd94f59b7-xz9dr: {kubelet node1} FailedMount: Unable to attach or mount volumes: unmounted volumes=[default-token-457cw], unattached volumes=[default-token-457cw]: timed out waiting for the condition&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:23:06 +0000 UTC - event for webserver-7748f58bfd-pcnfs: {kubelet node2} FailedMount: Unable to attach or mount volumes: unmounted volumes=[default-token-457cw], unattached volumes=[default-token-457cw]: timed out waiting for the condition&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:23:06 +0000 UTC - event for webserver-7748f58bfd-prl8t: {kubelet node1} FailedMount: Unable to attach or mount volumes: unmounted volumes=[default-token-457cw], unattached volumes=[default-token-457cw]: timed out waiting for the condition&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:23:46 +0000 UTC - event for webserver-57c6549b9c-wd4t8: {kubelet node2} FailedMount: Unable to attach or mount volumes: unmounted volumes=[default-token-457cw], unattached volumes=[default-token-457cw]: timed out waiting for the condition&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:23:46 +0000 UTC - event for webserver-69b69768db-kp8mk: {kubelet node2} FailedMount: Unable to attach or mount volumes: unmounted volumes=[default-token-457cw], unattached volumes=[default-token-457cw]: timed out waiting for the condition&#xA;May  7 23:26:51.260: INFO: At 2021-05-07 23:23:47 +0000 UTC - event for webserver-7748f58bfd-fsnl8: {kubelet node2} FailedMount: Unable to attach or mount volumes: unmounted volumes=[default-token-457cw], unattached volumes=[default-token-457cw]: timed out waiting for the condition&#xA;May  7 23:26:51.263: INFO: POD                         NODE   PHASE    GRACE  CONDITIONS&#xA;May  7 23:26:51.263: INFO: webserver-57c6549b9c-n675z  node1  Pending         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:33 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:33 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:33 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:33 +0000 UTC  }]&#xA;May  7 23:26:51.263: INFO: webserver-69b69768db-6q8t9  node2  Pending         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:25 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:25 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:25 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:25 +0000 UTC  }]&#xA;May  7 23:26:51.263: INFO: webserver-69b69768db-9p47q  node1  Pending         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:33 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:33 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:33 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:33 +0000 UTC  }]&#xA;May  7 23:26:51.263: INFO: webserver-69b69768db-wmwtc  node2  Pending         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:49 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:49 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:49 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:49 +0000 UTC  }]&#xA;May  7 23:26:51.263: INFO: webserver-7ccc6798d4-gcnhb  node1  Pending         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:43 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:43 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:43 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:43 +0000 UTC  }]&#xA;May  7 23:26:51.263: INFO: webserver-7ccc6798d4-vhdp2  node2  Pending         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:49 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:49 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:49 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:49 +0000 UTC  }]&#xA;May  7 23:26:51.263: INFO: webserver-7ccc6798d4-xsdt7  node2  Pending         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:43 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:43 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:43 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-05-07 23:21:43 +0000 UTC  }]&#xA;May  7 23:26:51.264: INFO: &#xA;May  7 23:26:51.268: INFO: &#xA;Logging node info for node master1&#xA;May  7 23:26:51.270: INFO: Node Info: &amp;Node{ObjectMeta:{master1   /api/v1/nodes/master1 7277f528-99fb-4be3-a928-27761a4599e8 75249 0 2021-05-07 19:59:27 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:master1 kubernetes.io/os:linux node-role.kubernetes.io/master:] map[flannel.alpha.coreos.com/backend-data:{&#34;VtepMAC&#34;:&#34;a2:74:f5:d6:48:e4&#34;} flannel.alpha.coreos.com/backend-type:vxlan flannel.alpha.coreos.com/kube-subnet-manager:true flannel.alpha.coreos.com/public-ip:10.10.190.202 kubeadm.alpha.kubernetes.io/cri-socket:/var/run/dockershim.sock nfd.node.kubernetes.io/master.version:v0.7.0 node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] []  [{kubelet Update v1 2021-05-07 19:59:27 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;.&#34;:{},&#34;f:volumes.kubernetes.io/controller-managed-attach-detach&#34;:{}},&#34;f:labels&#34;:{&#34;.&#34;:{},&#34;f:beta.kubernetes.io/arch&#34;:{},&#34;f:beta.kubernetes.io/os&#34;:{},&#34;f:kubernetes.io/arch&#34;:{},&#34;f:kubernetes.io/hostname&#34;:{},&#34;f:kubernetes.io/os&#34;:{}}},&#34;f:status&#34;:{&#34;f:addresses&#34;:{&#34;.&#34;:{},&#34;k:{\&#34;type\&#34;:\&#34;Hostname\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:address&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;InternalIP\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:address&#34;:{},&#34;f:type&#34;:{}}},&#34;f:allocatable&#34;:{&#34;.&#34;:{},&#34;f:cpu&#34;:{},&#34;f:ephemeral-storage&#34;:{},&#34;f:hugepages-1Gi&#34;:{},&#34;f:hugepages-2Mi&#34;:{},&#34;f:memory&#34;:{},&#34;f:pods&#34;:{}},&#34;f:capacity&#34;:{&#34;.&#34;:{},&#34;f:cpu&#34;:{},&#34;f:ephemeral-storage&#34;:{},&#34;f:hugepages-1Gi&#34;:{},&#34;f:hugepages-2Mi&#34;:{},&#34;f:memory&#34;:{},&#34;f:pods&#34;:{}},&#34;f:conditions&#34;:{&#34;.&#34;:{},&#34;k:{\&#34;type\&#34;:\&#34;DiskPressure\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;MemoryPressure\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;PIDPressure\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;Ready\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}}},&#34;f:daemonEndpoints&#34;:{&#34;f:kubeletEndpoint&#34;:{&#34;f:Port&#34;:{}}},&#34;f:images&#34;:{},&#34;f:nodeInfo&#34;:{&#34;f:architecture&#34;:{},&#34;f:bootID&#34;:{},&#34;f:containerRuntimeVersion&#34;:{},&#34;f:kernelVersion&#34;:{},&#34;f:kubeProxyVersion&#34;:{},&#34;f:kubeletVersion&#34;:{},&#34;f:machineID&#34;:{},&#34;f:operatingSystem&#34;:{},&#34;f:osImage&#34;:{},&#34;f:systemUUID&#34;:{}}}}} {kubeadm Update v1 2021-05-07 19:59:28 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:kubeadm.alpha.kubernetes.io/cri-socket&#34;:{}},&#34;f:labels&#34;:{&#34;f:node-role.kubernetes.io/master&#34;:{}}}}} {flanneld Update v1 2021-05-07 20:02:07 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:flannel.alpha.coreos.com/backend-data&#34;:{},&#34;f:flannel.alpha.coreos.com/backend-type&#34;:{},&#34;f:flannel.alpha.coreos.com/kube-subnet-manager&#34;:{},&#34;f:flannel.alpha.coreos.com/public-ip&#34;:{}}},&#34;f:status&#34;:{&#34;f:conditions&#34;:{&#34;k:{\&#34;type\&#34;:\&#34;NetworkUnavailable\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}}}}}} {kube-controller-manager Update v1 2021-05-07 20:02:12 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:node.alpha.kubernetes.io/ttl&#34;:{}}},&#34;f:spec&#34;:{&#34;f:podCIDR&#34;:{},&#34;f:podCIDRs&#34;:{&#34;.&#34;:{},&#34;v:\&#34;10.244.0.0/24\&#34;&#34;:{}},&#34;f:taints&#34;:{}}}} {nfd-master Update v1 2021-05-07 20:08:42 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:nfd.node.kubernetes.io/master.version&#34;:{}}}}}]},Spec:NodeSpec{PodCIDR:10.244.0.0/24,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:&lt;nil&gt;,},},ConfigSource:nil,PodCIDRs:[10.244.0.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{80 0} {&lt;nil&gt;} 80 DecimalSI},ephemeral-storage: {{450471260160 0} {&lt;nil&gt;} 439913340Ki BinarySI},hugepages-1Gi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},hugepages-2Mi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},memory: {{201234763776 0} {&lt;nil&gt;} 196518324Ki BinarySI},pods: {{110 0} {&lt;nil&gt;} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{79550 -3} {&lt;nil&gt;} 79550m DecimalSI},ephemeral-storage: {{405424133473 0} {&lt;nil&gt;} 405424133473 DecimalSI},hugepages-1Gi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},hugepages-2Mi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},memory: {{200324599808 0} {&lt;nil&gt;} 195629492Ki BinarySI},pods: {{110 0} {&lt;nil&gt;} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2021-05-07 20:05:15 +0000 UTC,LastTransitionTime:2021-05-07 20:05:15 +0000 UTC,Reason:FlannelIsUp,Message:Flannel is running on this node,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2021-05-07 23:26:43 +0000 UTC,LastTransitionTime:2021-05-07 19:59:26 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2021-05-07 23:26:43 +0000 UTC,LastTransitionTime:2021-05-07 19:59:26 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2021-05-07 23:26:43 +0000 UTC,LastTransitionTime:2021-05-07 19:59:26 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2021-05-07 23:26:43 +0000 UTC,LastTransitionTime:2021-05-07 20:02:12 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.10.190.202,},NodeAddress{Type:Hostname,Address:master1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:44253340a37b4ed7bf5b3a3547b5f57d,SystemUUID:00ACFB60-0631-E711-906E-0017A4403562,BootID:2a44f9c3-2714-4b5c-975d-25d069f66483,KernelVersion:3.10.0-1160.25.1.el7.x86_64,OSImage:CentOS Linux 7 (Core),ContainerRuntimeVersion:docker://19.3.14,KubeletVersion:v1.19.8,KubeProxyVersion:v1.19.8,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[cmk:v1.5.1 localhost:30500/cmk:v1.5.1],SizeBytes:726657349,},ContainerImage{Names:[centos/python-36-centos7@sha256:ac50754646f0d37616515fb30467d8743fb12954260ec36c9ecb5a94499447e0 centos/python-36-centos7:latest],SizeBytes:650061677,},ContainerImage{Names:[nfvpe/multus@sha256:ac1266b87ba44c09dc2a336f0d5dad968fccd389ce1944a85e87b32cd21f7224 nfvpe/multus:v3.4.2],SizeBytes:276587882,},ContainerImage{Names:[kubernetesui/dashboard-amd64@sha256:3af248961c56916aeca8eb4000c15d6cf6a69641ea92f0540865bb37b495932f kubernetesui/dashboard-amd64:v2.1.0],SizeBytes:225733746,},ContainerImage{Names:[k8s.gcr.io/kube-apiserver@sha256:82e0ce4e1d08f3749d05c584fd60986197bfcdf9ce71d4666c71674221d53135 k8s.gcr.io/kube-apiserver:v1.19.8],SizeBytes:118813022,},ContainerImage{Names:[k8s.gcr.io/kube-proxy@sha256:8ed30419d9cf8965854f9ed501159e15deb30c42c3d2a60a278ae169320d140e k8s.gcr.io/kube-proxy:v1.19.8],SizeBytes:117674285,},ContainerImage{Names:[k8s.gcr.io/kube-controller-manager@sha256:2769005fb667dbb936009894d01fe35f5ce1bce45eee80a9ce3c139b9be4080e k8s.gcr.io/kube-controller-manager:v1.19.8],SizeBytes:110805342,},ContainerImage{Names:[gcr.io/k8s-staging-nfd/node-feature-discovery@sha256:5d116c2c340be665a2c8adc9aca7f91396bd5cbde4add4fdc8dab95d8db43425 gcr.io/k8s-staging-nfd/node-feature-discovery:v0.7.0],SizeBytes:108309584,},ContainerImage{Names:[quay.io/coreos/etcd@sha256:04833b601fa130512450afa45c4fe484fee1293634f34c7ddc231bd193c74017 quay.io/coreos/etcd:v3.4.13],SizeBytes:83790470,},ContainerImage{Names:[quay.io/coreos/flannel@sha256:34860ea294a018d392e61936f19a7862d5e92039d196cac9176da14b2bbd0fe3 quay.io/coreos/flannel@sha256:ac5322604bcab484955e6dbc507f45a906bde79046667322e3918a8578ab08c8 quay.io/coreos/flannel:v0.13.0 quay.io/coreos/flannel:v0.13.0-amd64],SizeBytes:57156911,},ContainerImage{Names:[quay.io/coreos/kube-rbac-proxy@sha256:e10d1d982dd653db74ca87a1d1ad017bc5ef1aeb651bdea089debf16485b080b quay.io/coreos/kube-rbac-proxy:v0.5.0],SizeBytes:46626428,},ContainerImage{Names:[k8s.gcr.io/kube-scheduler@sha256:bb66135ce9a25ac405e43bbae6a2ac766e0efcac0a6a73ef9d1fbb4cf4732c9b k8s.gcr.io/kube-scheduler:v1.19.8],SizeBytes:46510430,},ContainerImage{Names:[k8s.gcr.io/coredns@sha256:73ca82b4ce829766d4f1f10947c3a338888f876fbed0540dc849c89ff256e90c k8s.gcr.io/coredns:1.7.0],SizeBytes:45227747,},ContainerImage{Names:[quay.io/coreos/kube-rbac-proxy@sha256:9d07c391aeb1a9d02eb4343c113ed01825227c70c32b3cae861711f90191b0fd quay.io/coreos/kube-rbac-proxy:v0.4.1],SizeBytes:41317870,},ContainerImage{Names:[k8s.gcr.io/cpa/cluster-proportional-autoscaler-amd64@sha256:dce43068853ad396b0fb5ace9a56cc14114e31979e241342d12d04526be1dfcc k8s.gcr.io/cpa/cluster-proportional-autoscaler-amd64:1.8.3],SizeBytes:40647382,},ContainerImage{Names:[quay.io/coreos/prometheus-operator@sha256:a54e806fb27d2fb0251da4f3b2a3bb5320759af63a54a755788304775f2384a7 quay.io/coreos/prometheus-operator:v0.40.0],SizeBytes:38238457,},ContainerImage{Names:[kubernetesui/metrics-scraper@sha256:1f977343873ed0e2efd4916a6b2f3075f310ff6fe42ee098f54fc58aa7a28ab7 kubernetesui/metrics-scraper:v1.0.6],SizeBytes:34548789,},ContainerImage{Names:[registry@sha256:1cd9409a311350c3072fe510b52046f104416376c126a479cef9a4dfe692cf57 registry:2.7.0],SizeBytes:24191168,},ContainerImage{Names:[quay.io/prometheus/node-exporter@sha256:a2f29256e53cc3e0b64d7a472512600b2e9410347d53cdc85b49f659c17e02ee quay.io/prometheus/node-exporter:v0.18.1],SizeBytes:22933477,},ContainerImage{Names:[localhost:30500/tas-controller@sha256:09461cf1b75776eb7d277a89d3a624c9eea355bf2ab1d8abbe45c40df99de268 tas-controller:latest localhost:30500/tas-controller:0.1],SizeBytes:22922439,},ContainerImage{Names:[nginx@sha256:a97eb9ecc708c8aa715ccfb5e9338f5456e4b65575daf304f108301f3b497314 nginx:1.19.2-alpine],SizeBytes:22052669,},ContainerImage{Names:[localhost:30500/tas-extender@sha256:5b4ebd3c9985a2f36839e18a8b7c84e4d1deb89fa486247108510c71673efe12 tas-extender:latest localhost:30500/tas-extender:0.1],SizeBytes:21320903,},ContainerImage{Names:[&lt;none&gt;@&lt;none&gt; &lt;none&gt;:&lt;none&gt;],SizeBytes:5577654,},ContainerImage{Names:[alpine@sha256:c0e9560cda118f9ec63ddefb4a173a2b2a0347082d7dff7dc14272e7841a5b5a alpine:3.12.1],SizeBytes:5573013,},ContainerImage{Names:[k8s.gcr.io/pause@sha256:927d98197ec1141a368550822d18fa1c60bdae27b78b0c004f705f548c07814f k8s.gcr.io/pause:3.2],SizeBytes:682696,},ContainerImage{Names:[k8s.gcr.io/pause@sha256:a319ac2280eb7e3a59e252e54b76327cb4a33cf8389053b0d78277f22bbca2fa k8s.gcr.io/pause:3.3],SizeBytes:682696,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},}&#xA;May  7 23:26:51.271: INFO: &#xA;Logging kubelet events for node master1&#xA;May  7 23:26:51.273: INFO: &#xA;Logging pods the kubelet thinks is on node master1&#xA;May  7 23:26:51.294: INFO: kube-proxy-qpvcb started at 2021-05-07 20:01:27 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.294: INFO: &#x9;Container kube-proxy ready: true, restart count 2&#xA;May  7 23:26:51.294: INFO: kube-multus-ds-amd64-8tjh4 started at 2021-05-07 20:02:10 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.294: INFO: &#x9;Container kube-multus ready: true, restart count 1&#xA;May  7 23:26:51.294: INFO: docker-registry-docker-registry-56cbc7bc58-267wq started at 2021-05-07 20:05:51 +0000 UTC (0+2 container statuses recorded)&#xA;May  7 23:26:51.294: INFO: &#x9;Container docker-registry ready: true, restart count 0&#xA;May  7 23:26:51.294: INFO: &#x9;Container nginx ready: true, restart count 0&#xA;May  7 23:26:51.294: INFO: prometheus-operator-5bb8cb9d8f-xqpnw started at 2021-05-07 20:12:35 +0000 UTC (0+2 container statuses recorded)&#xA;May  7 23:26:51.294: INFO: &#x9;Container kube-rbac-proxy ready: true, restart count 0&#xA;May  7 23:26:51.294: INFO: &#x9;Container prometheus-operator ready: true, restart count 0&#xA;May  7 23:26:51.294: INFO: kube-apiserver-master1 started at 2021-05-07 20:07:46 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.294: INFO: &#x9;Container kube-apiserver ready: true, restart count 0&#xA;May  7 23:26:51.294: INFO: kube-controller-manager-master1 started at 2021-05-07 20:04:26 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.294: INFO: &#x9;Container kube-controller-manager ready: true, restart count 2&#xA;May  7 23:26:51.294: INFO: kube-scheduler-master1 started at 2021-05-07 20:15:37 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.294: INFO: &#x9;Container kube-scheduler ready: true, restart count 0&#xA;May  7 23:26:51.294: INFO: kube-flannel-nj6vr started at 2021-05-07 20:02:02 +0000 UTC (1+1 container statuses recorded)&#xA;May  7 23:26:51.294: INFO: &#x9;Init container install-cni ready: true, restart count 0&#xA;May  7 23:26:51.294: INFO: &#x9;Container kube-flannel ready: true, restart count 2&#xA;May  7 23:26:51.294: INFO: coredns-7677f9bb54-wvd65 started at 2021-05-07 20:02:30 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.294: INFO: &#x9;Container coredns ready: true, restart count 2&#xA;May  7 23:26:51.294: INFO: node-feature-discovery-controller-5bf5c49849-z2bj7 started at 2021-05-07 20:08:34 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.294: INFO: &#x9;Container nfd-controller ready: true, restart count 0&#xA;May  7 23:26:51.294: INFO: node-exporter-9sm5v started at 2021-05-07 20:12:42 +0000 UTC (0+2 container statuses recorded)&#xA;May  7 23:26:51.294: INFO: &#x9;Container kube-rbac-proxy ready: true, restart count 0&#xA;May  7 23:26:51.294: INFO: &#x9;Container node-exporter ready: true, restart count 0&#xA;W0507 23:26:51.306806      36 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled.&#xA;May  7 23:26:51.331: INFO: &#xA;Latency metrics for node master1&#xA;May  7 23:26:51.331: INFO: &#xA;Logging node info for node master2&#xA;May  7 23:26:51.333: INFO: Node Info: &amp;Node{ObjectMeta:{master2   /api/v1/nodes/master2 8193aa97-7914-4d63-b6cf-a7ceb8fe519c 75279 0 2021-05-07 20:00:06 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:master2 kubernetes.io/os:linux node-role.kubernetes.io/master:] map[flannel.alpha.coreos.com/backend-data:{&#34;VtepMAC&#34;:&#34;86:88:6a:bc:18:32&#34;} flannel.alpha.coreos.com/backend-type:vxlan flannel.alpha.coreos.com/kube-subnet-manager:true flannel.alpha.coreos.com/public-ip:10.10.190.203 kubeadm.alpha.kubernetes.io/cri-socket:/var/run/dockershim.sock node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] []  [{kubelet Update v1 2021-05-07 20:00:06 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;.&#34;:{},&#34;f:volumes.kubernetes.io/controller-managed-attach-detach&#34;:{}},&#34;f:labels&#34;:{&#34;.&#34;:{},&#34;f:beta.kubernetes.io/arch&#34;:{},&#34;f:beta.kubernetes.io/os&#34;:{},&#34;f:kubernetes.io/arch&#34;:{},&#34;f:kubernetes.io/hostname&#34;:{},&#34;f:kubernetes.io/os&#34;:{}}},&#34;f:status&#34;:{&#34;f:addresses&#34;:{&#34;.&#34;:{},&#34;k:{\&#34;type\&#34;:\&#34;Hostname\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:address&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;InternalIP\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:address&#34;:{},&#34;f:type&#34;:{}}},&#34;f:allocatable&#34;:{&#34;.&#34;:{},&#34;f:cpu&#34;:{},&#34;f:ephemeral-storage&#34;:{},&#34;f:hugepages-1Gi&#34;:{},&#34;f:hugepages-2Mi&#34;:{},&#34;f:memory&#34;:{},&#34;f:pods&#34;:{}},&#34;f:capacity&#34;:{&#34;.&#34;:{},&#34;f:cpu&#34;:{},&#34;f:ephemeral-storage&#34;:{},&#34;f:hugepages-1Gi&#34;:{},&#34;f:hugepages-2Mi&#34;:{},&#34;f:memory&#34;:{},&#34;f:pods&#34;:{}},&#34;f:conditions&#34;:{&#34;.&#34;:{},&#34;k:{\&#34;type\&#34;:\&#34;DiskPressure\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;MemoryPressure\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;PIDPressure\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;Ready\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}}},&#34;f:daemonEndpoints&#34;:{&#34;f:kubeletEndpoint&#34;:{&#34;f:Port&#34;:{}}},&#34;f:images&#34;:{},&#34;f:nodeInfo&#34;:{&#34;f:architecture&#34;:{},&#34;f:bootID&#34;:{},&#34;f:containerRuntimeVersion&#34;:{},&#34;f:kernelVersion&#34;:{},&#34;f:kubeProxyVersion&#34;:{},&#34;f:kubeletVersion&#34;:{},&#34;f:machineID&#34;:{},&#34;f:operatingSystem&#34;:{},&#34;f:osImage&#34;:{},&#34;f:systemUUID&#34;:{}}}}} {kubeadm Update v1 2021-05-07 20:00:07 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:kubeadm.alpha.kubernetes.io/cri-socket&#34;:{}},&#34;f:labels&#34;:{&#34;f:node-role.kubernetes.io/master&#34;:{}}}}} {kube-controller-manager Update v1 2021-05-07 20:01:13 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:node.alpha.kubernetes.io/ttl&#34;:{}}},&#34;f:spec&#34;:{&#34;f:podCIDR&#34;:{},&#34;f:podCIDRs&#34;:{&#34;.&#34;:{},&#34;v:\&#34;10.244.1.0/24\&#34;&#34;:{}},&#34;f:taints&#34;:{}}}} {flanneld Update v1 2021-05-07 20:02:07 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:flannel.alpha.coreos.com/backend-data&#34;:{},&#34;f:flannel.alpha.coreos.com/backend-type&#34;:{},&#34;f:flannel.alpha.coreos.com/kube-subnet-manager&#34;:{},&#34;f:flannel.alpha.coreos.com/public-ip&#34;:{}}},&#34;f:status&#34;:{&#34;f:conditions&#34;:{&#34;k:{\&#34;type\&#34;:\&#34;NetworkUnavailable\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}}}}}}]},Spec:NodeSpec{PodCIDR:10.244.1.0/24,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:&lt;nil&gt;,},},ConfigSource:nil,PodCIDRs:[10.244.1.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{80 0} {&lt;nil&gt;} 80 DecimalSI},ephemeral-storage: {{450471260160 0} {&lt;nil&gt;} 439913340Ki BinarySI},hugepages-1Gi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},hugepages-2Mi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},memory: {{201234767872 0} {&lt;nil&gt;}  BinarySI},pods: {{110 0} {&lt;nil&gt;} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{79550 -3} {&lt;nil&gt;} 79550m DecimalSI},ephemeral-storage: {{405424133473 0} {&lt;nil&gt;} 405424133473 DecimalSI},hugepages-1Gi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},hugepages-2Mi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},memory: {{200324603904 0} {&lt;nil&gt;}  BinarySI},pods: {{110 0} {&lt;nil&gt;} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2021-05-07 20:04:58 +0000 UTC,LastTransitionTime:2021-05-07 20:04:58 +0000 UTC,Reason:FlannelIsUp,Message:Flannel is running on this node,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2021-05-07 23:26:48 +0000 UTC,LastTransitionTime:2021-05-07 20:00:06 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2021-05-07 23:26:48 +0000 UTC,LastTransitionTime:2021-05-07 20:00:06 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2021-05-07 23:26:48 +0000 UTC,LastTransitionTime:2021-05-07 20:00:06 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2021-05-07 23:26:48 +0000 UTC,LastTransitionTime:2021-05-07 20:02:12 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.10.190.203,},NodeAddress{Type:Hostname,Address:master2,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:cf652b4fb7284de2aaafc5f52f51e312,SystemUUID:00A0DE53-E51D-E711-906E-0017A4403562,BootID:df4da196-5a1c-4b40-aef7-034e79f7a8e2,KernelVersion:3.10.0-1160.25.1.el7.x86_64,OSImage:CentOS Linux 7 (Core),ContainerRuntimeVersion:docker://19.3.14,KubeletVersion:v1.19.8,KubeProxyVersion:v1.19.8,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[cmk:v1.5.1 localhost:30500/cmk:v1.5.1],SizeBytes:726657349,},ContainerImage{Names:[centos/python-36-centos7@sha256:ac50754646f0d37616515fb30467d8743fb12954260ec36c9ecb5a94499447e0 centos/python-36-centos7:latest],SizeBytes:650061677,},ContainerImage{Names:[nfvpe/multus@sha256:ac1266b87ba44c09dc2a336f0d5dad968fccd389ce1944a85e87b32cd21f7224 nfvpe/multus:v3.4.2],SizeBytes:276587882,},ContainerImage{Names:[kubernetesui/dashboard-amd64@sha256:3af248961c56916aeca8eb4000c15d6cf6a69641ea92f0540865bb37b495932f kubernetesui/dashboard-amd64:v2.1.0],SizeBytes:225733746,},ContainerImage{Names:[k8s.gcr.io/kube-apiserver@sha256:82e0ce4e1d08f3749d05c584fd60986197bfcdf9ce71d4666c71674221d53135 k8s.gcr.io/kube-apiserver:v1.19.8],SizeBytes:118813022,},ContainerImage{Names:[k8s.gcr.io/kube-proxy@sha256:8ed30419d9cf8965854f9ed501159e15deb30c42c3d2a60a278ae169320d140e k8s.gcr.io/kube-proxy:v1.19.8],SizeBytes:117674285,},ContainerImage{Names:[k8s.gcr.io/kube-controller-manager@sha256:2769005fb667dbb936009894d01fe35f5ce1bce45eee80a9ce3c139b9be4080e k8s.gcr.io/kube-controller-manager:v1.19.8],SizeBytes:110805342,},ContainerImage{Names:[quay.io/coreos/etcd@sha256:04833b601fa130512450afa45c4fe484fee1293634f34c7ddc231bd193c74017 quay.io/coreos/etcd:v3.4.13],SizeBytes:83790470,},ContainerImage{Names:[quay.io/coreos/flannel@sha256:34860ea294a018d392e61936f19a7862d5e92039d196cac9176da14b2bbd0fe3 quay.io/coreos/flannel@sha256:ac5322604bcab484955e6dbc507f45a906bde79046667322e3918a8578ab08c8 quay.io/coreos/flannel:v0.13.0 quay.io/coreos/flannel:v0.13.0-amd64],SizeBytes:57156911,},ContainerImage{Names:[quay.io/coreos/kube-rbac-proxy@sha256:e10d1d982dd653db74ca87a1d1ad017bc5ef1aeb651bdea089debf16485b080b quay.io/coreos/kube-rbac-proxy:v0.5.0],SizeBytes:46626428,},ContainerImage{Names:[k8s.gcr.io/kube-scheduler@sha256:bb66135ce9a25ac405e43bbae6a2ac766e0efcac0a6a73ef9d1fbb4cf4732c9b k8s.gcr.io/kube-scheduler:v1.19.8],SizeBytes:46510430,},ContainerImage{Names:[k8s.gcr.io/coredns@sha256:73ca82b4ce829766d4f1f10947c3a338888f876fbed0540dc849c89ff256e90c k8s.gcr.io/coredns:1.7.0],SizeBytes:45227747,},ContainerImage{Names:[k8s.gcr.io/cpa/cluster-proportional-autoscaler-amd64@sha256:dce43068853ad396b0fb5ace9a56cc14114e31979e241342d12d04526be1dfcc k8s.gcr.io/cpa/cluster-proportional-autoscaler-amd64:1.8.3],SizeBytes:40647382,},ContainerImage{Names:[kubernetesui/metrics-scraper@sha256:1f977343873ed0e2efd4916a6b2f3075f310ff6fe42ee098f54fc58aa7a28ab7 kubernetesui/metrics-scraper:v1.0.6],SizeBytes:34548789,},ContainerImage{Names:[quay.io/prometheus/node-exporter@sha256:a2f29256e53cc3e0b64d7a472512600b2e9410347d53cdc85b49f659c17e02ee quay.io/prometheus/node-exporter:v0.18.1],SizeBytes:22933477,},ContainerImage{Names:[k8s.gcr.io/pause@sha256:927d98197ec1141a368550822d18fa1c60bdae27b78b0c004f705f548c07814f k8s.gcr.io/pause:3.2],SizeBytes:682696,},ContainerImage{Names:[k8s.gcr.io/pause@sha256:a319ac2280eb7e3a59e252e54b76327cb4a33cf8389053b0d78277f22bbca2fa k8s.gcr.io/pause:3.3],SizeBytes:682696,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},}&#xA;May  7 23:26:51.333: INFO: &#xA;Logging kubelet events for node master2&#xA;May  7 23:26:51.336: INFO: &#xA;Logging pods the kubelet thinks is on node master2&#xA;May  7 23:26:51.344: INFO: kube-flannel-wwlww started at 2021-05-07 20:02:02 +0000 UTC (1+1 container statuses recorded)&#xA;May  7 23:26:51.344: INFO: &#x9;Init container install-cni ready: true, restart count 0&#xA;May  7 23:26:51.344: INFO: &#x9;Container kube-flannel ready: true, restart count 1&#xA;May  7 23:26:51.344: INFO: kube-multus-ds-amd64-44vrh started at 2021-05-07 20:02:10 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.344: INFO: &#x9;Container kube-multus ready: true, restart count 1&#xA;May  7 23:26:51.344: INFO: dns-autoscaler-5b7b5c9b6f-flqh2 started at 2021-05-07 20:02:33 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.344: INFO: &#x9;Container autoscaler ready: true, restart count 2&#xA;May  7 23:26:51.344: INFO: node-exporter-6nqvj started at 2021-05-07 20:12:42 +0000 UTC (0+2 container statuses recorded)&#xA;May  7 23:26:51.344: INFO: &#x9;Container kube-rbac-proxy ready: true, restart count 0&#xA;May  7 23:26:51.344: INFO: &#x9;Container node-exporter ready: true, restart count 0&#xA;May  7 23:26:51.344: INFO: kube-apiserver-master2 started at 2021-05-07 20:07:46 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.344: INFO: &#x9;Container kube-apiserver ready: true, restart count 0&#xA;May  7 23:26:51.344: INFO: kube-controller-manager-master2 started at 2021-05-07 20:00:42 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.344: INFO: &#x9;Container kube-controller-manager ready: true, restart count 2&#xA;May  7 23:26:51.344: INFO: kube-scheduler-master2 started at 2021-05-07 20:00:42 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.344: INFO: &#x9;Container kube-scheduler ready: true, restart count 2&#xA;May  7 23:26:51.344: INFO: kube-proxy-fg5dt started at 2021-05-07 20:01:27 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.344: INFO: &#x9;Container kube-proxy ready: true, restart count 1&#xA;W0507 23:26:51.356659      36 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled.&#xA;May  7 23:26:51.379: INFO: &#xA;Latency metrics for node master2&#xA;May  7 23:26:51.379: INFO: &#xA;Logging node info for node master3&#xA;May  7 23:26:51.382: INFO: Node Info: &amp;Node{ObjectMeta:{master3   /api/v1/nodes/master3 27e1bfdf-bb8c-4924-9488-acacef172e76 75289 0 2021-05-07 20:00:17 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:master3 kubernetes.io/os:linux node-role.kubernetes.io/master:] map[flannel.alpha.coreos.com/backend-data:{&#34;VtepMAC&#34;:&#34;de:31:a8:23:73:1a&#34;} flannel.alpha.coreos.com/backend-type:vxlan flannel.alpha.coreos.com/kube-subnet-manager:true flannel.alpha.coreos.com/public-ip:10.10.190.204 kubeadm.alpha.kubernetes.io/cri-socket:/var/run/dockershim.sock node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] []  [{kubelet Update v1 2021-05-07 20:00:17 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;.&#34;:{},&#34;f:volumes.kubernetes.io/controller-managed-attach-detach&#34;:{}},&#34;f:labels&#34;:{&#34;.&#34;:{},&#34;f:beta.kubernetes.io/arch&#34;:{},&#34;f:beta.kubernetes.io/os&#34;:{},&#34;f:kubernetes.io/arch&#34;:{},&#34;f:kubernetes.io/hostname&#34;:{},&#34;f:kubernetes.io/os&#34;:{}}},&#34;f:status&#34;:{&#34;f:addresses&#34;:{&#34;.&#34;:{},&#34;k:{\&#34;type\&#34;:\&#34;Hostname\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:address&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;InternalIP\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:address&#34;:{},&#34;f:type&#34;:{}}},&#34;f:allocatable&#34;:{&#34;.&#34;:{},&#34;f:cpu&#34;:{},&#34;f:ephemeral-storage&#34;:{},&#34;f:hugepages-1Gi&#34;:{},&#34;f:hugepages-2Mi&#34;:{},&#34;f:memory&#34;:{},&#34;f:pods&#34;:{}},&#34;f:capacity&#34;:{&#34;.&#34;:{},&#34;f:cpu&#34;:{},&#34;f:ephemeral-storage&#34;:{},&#34;f:hugepages-1Gi&#34;:{},&#34;f:hugepages-2Mi&#34;:{},&#34;f:memory&#34;:{},&#34;f:pods&#34;:{}},&#34;f:conditions&#34;:{&#34;.&#34;:{},&#34;k:{\&#34;type\&#34;:\&#34;DiskPressure\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;MemoryPressure\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;PIDPressure\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;Ready\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}}},&#34;f:daemonEndpoints&#34;:{&#34;f:kubeletEndpoint&#34;:{&#34;f:Port&#34;:{}}},&#34;f:images&#34;:{},&#34;f:nodeInfo&#34;:{&#34;f:architecture&#34;:{},&#34;f:bootID&#34;:{},&#34;f:containerRuntimeVersion&#34;:{},&#34;f:kernelVersion&#34;:{},&#34;f:kubeProxyVersion&#34;:{},&#34;f:kubeletVersion&#34;:{},&#34;f:machineID&#34;:{},&#34;f:operatingSystem&#34;:{},&#34;f:osImage&#34;:{},&#34;f:systemUUID&#34;:{}}}}} {kubeadm Update v1 2021-05-07 20:00:18 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:kubeadm.alpha.kubernetes.io/cri-socket&#34;:{}},&#34;f:labels&#34;:{&#34;f:node-role.kubernetes.io/master&#34;:{}}}}} {flanneld Update v1 2021-05-07 20:02:07 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:flannel.alpha.coreos.com/backend-data&#34;:{},&#34;f:flannel.alpha.coreos.com/backend-type&#34;:{},&#34;f:flannel.alpha.coreos.com/kube-subnet-manager&#34;:{},&#34;f:flannel.alpha.coreos.com/public-ip&#34;:{}}},&#34;f:status&#34;:{&#34;f:conditions&#34;:{&#34;k:{\&#34;type\&#34;:\&#34;NetworkUnavailable\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}}}}}} {kube-controller-manager Update v1 2021-05-07 20:02:12 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:node.alpha.kubernetes.io/ttl&#34;:{}}},&#34;f:spec&#34;:{&#34;f:podCIDR&#34;:{},&#34;f:podCIDRs&#34;:{&#34;.&#34;:{},&#34;v:\&#34;10.244.2.0/24\&#34;&#34;:{}},&#34;f:taints&#34;:{}}}}]},Spec:NodeSpec{PodCIDR:10.244.2.0/24,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:&lt;nil&gt;,},},ConfigSource:nil,PodCIDRs:[10.244.2.0/24],},Status:NodeStatus{Capacity:ResourceList{cpu: {{80 0} {&lt;nil&gt;} 80 DecimalSI},ephemeral-storage: {{450471260160 0} {&lt;nil&gt;} 439913340Ki BinarySI},hugepages-1Gi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},hugepages-2Mi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},memory: {{201234763776 0} {&lt;nil&gt;} 196518324Ki BinarySI},pods: {{110 0} {&lt;nil&gt;} 110 DecimalSI},},Allocatable:ResourceList{cpu: {{79550 -3} {&lt;nil&gt;} 79550m DecimalSI},ephemeral-storage: {{405424133473 0} {&lt;nil&gt;} 405424133473 DecimalSI},hugepages-1Gi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},hugepages-2Mi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},memory: {{200324599808 0} {&lt;nil&gt;} 195629492Ki BinarySI},pods: {{110 0} {&lt;nil&gt;} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2021-05-07 20:04:10 +0000 UTC,LastTransitionTime:2021-05-07 20:04:10 +0000 UTC,Reason:FlannelIsUp,Message:Flannel is running on this node,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2021-05-07 23:26:49 +0000 UTC,LastTransitionTime:2021-05-07 20:00:17 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2021-05-07 23:26:49 +0000 UTC,LastTransitionTime:2021-05-07 20:00:17 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2021-05-07 23:26:49 +0000 UTC,LastTransitionTime:2021-05-07 20:00:17 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2021-05-07 23:26:49 +0000 UTC,LastTransitionTime:2021-05-07 20:03:45 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.10.190.204,},NodeAddress{Type:Hostname,Address:master3,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:01ba71adbf0d4ef79a9af064c18faf21,SystemUUID:008B1444-141E-E711-906E-0017A4403562,BootID:1692e58d-c13c-425c-b11a-62d901f965ec,KernelVersion:3.10.0-1160.25.1.el7.x86_64,OSImage:CentOS Linux 7 (Core),ContainerRuntimeVersion:docker://19.3.14,KubeletVersion:v1.19.8,KubeProxyVersion:v1.19.8,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[cmk:v1.5.1 localhost:30500/cmk:v1.5.1],SizeBytes:726657349,},ContainerImage{Names:[centos/python-36-centos7@sha256:ac50754646f0d37616515fb30467d8743fb12954260ec36c9ecb5a94499447e0 centos/python-36-centos7:latest],SizeBytes:650061677,},ContainerImage{Names:[nfvpe/multus@sha256:ac1266b87ba44c09dc2a336f0d5dad968fccd389ce1944a85e87b32cd21f7224 nfvpe/multus:v3.4.2],SizeBytes:276587882,},ContainerImage{Names:[kubernetesui/dashboard-amd64@sha256:3af248961c56916aeca8eb4000c15d6cf6a69641ea92f0540865bb37b495932f kubernetesui/dashboard-amd64:v2.1.0],SizeBytes:225733746,},ContainerImage{Names:[k8s.gcr.io/kube-apiserver@sha256:82e0ce4e1d08f3749d05c584fd60986197bfcdf9ce71d4666c71674221d53135 k8s.gcr.io/kube-apiserver:v1.19.8],SizeBytes:118813022,},ContainerImage{Names:[k8s.gcr.io/kube-proxy@sha256:8ed30419d9cf8965854f9ed501159e15deb30c42c3d2a60a278ae169320d140e k8s.gcr.io/kube-proxy:v1.19.8],SizeBytes:117674285,},ContainerImage{Names:[k8s.gcr.io/kube-controller-manager@sha256:2769005fb667dbb936009894d01fe35f5ce1bce45eee80a9ce3c139b9be4080e k8s.gcr.io/kube-controller-manager:v1.19.8],SizeBytes:110805342,},ContainerImage{Names:[quay.io/coreos/etcd@sha256:04833b601fa130512450afa45c4fe484fee1293634f34c7ddc231bd193c74017 quay.io/coreos/etcd:v3.4.13],SizeBytes:83790470,},ContainerImage{Names:[quay.io/coreos/flannel@sha256:34860ea294a018d392e61936f19a7862d5e92039d196cac9176da14b2bbd0fe3 quay.io/coreos/flannel@sha256:ac5322604bcab484955e6dbc507f45a906bde79046667322e3918a8578ab08c8 quay.io/coreos/flannel:v0.13.0 quay.io/coreos/flannel:v0.13.0-amd64],SizeBytes:57156911,},ContainerImage{Names:[quay.io/coreos/kube-rbac-proxy@sha256:e10d1d982dd653db74ca87a1d1ad017bc5ef1aeb651bdea089debf16485b080b quay.io/coreos/kube-rbac-proxy:v0.5.0],SizeBytes:46626428,},ContainerImage{Names:[k8s.gcr.io/kube-scheduler@sha256:bb66135ce9a25ac405e43bbae6a2ac766e0efcac0a6a73ef9d1fbb4cf4732c9b k8s.gcr.io/kube-scheduler:v1.19.8],SizeBytes:46510430,},ContainerImage{Names:[k8s.gcr.io/coredns@sha256:73ca82b4ce829766d4f1f10947c3a338888f876fbed0540dc849c89ff256e90c k8s.gcr.io/coredns:1.7.0],SizeBytes:45227747,},ContainerImage{Names:[k8s.gcr.io/cpa/cluster-proportional-autoscaler-amd64@sha256:dce43068853ad396b0fb5ace9a56cc14114e31979e241342d12d04526be1dfcc k8s.gcr.io/cpa/cluster-proportional-autoscaler-amd64:1.8.3],SizeBytes:40647382,},ContainerImage{Names:[kubernetesui/metrics-scraper@sha256:1f977343873ed0e2efd4916a6b2f3075f310ff6fe42ee098f54fc58aa7a28ab7 kubernetesui/metrics-scraper:v1.0.6],SizeBytes:34548789,},ContainerImage{Names:[quay.io/prometheus/node-exporter@sha256:a2f29256e53cc3e0b64d7a472512600b2e9410347d53cdc85b49f659c17e02ee quay.io/prometheus/node-exporter:v0.18.1],SizeBytes:22933477,},ContainerImage{Names:[k8s.gcr.io/pause@sha256:927d98197ec1141a368550822d18fa1c60bdae27b78b0c004f705f548c07814f k8s.gcr.io/pause:3.2],SizeBytes:682696,},ContainerImage{Names:[k8s.gcr.io/pause@sha256:a319ac2280eb7e3a59e252e54b76327cb4a33cf8389053b0d78277f22bbca2fa k8s.gcr.io/pause:3.3],SizeBytes:682696,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},}&#xA;May  7 23:26:51.382: INFO: &#xA;Logging kubelet events for node master3&#xA;May  7 23:26:51.384: INFO: &#xA;Logging pods the kubelet thinks is on node master3&#xA;May  7 23:26:51.392: INFO: node-exporter-tsk7w started at 2021-05-07 20:12:42 +0000 UTC (0+2 container statuses recorded)&#xA;May  7 23:26:51.392: INFO: &#x9;Container kube-rbac-proxy ready: true, restart count 0&#xA;May  7 23:26:51.392: INFO: &#x9;Container node-exporter ready: true, restart count 0&#xA;May  7 23:26:51.392: INFO: kube-apiserver-master3 started at 2021-05-07 20:03:45 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.392: INFO: &#x9;Container kube-apiserver ready: true, restart count 0&#xA;May  7 23:26:51.392: INFO: kube-controller-manager-master3 started at 2021-05-07 20:03:45 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.393: INFO: &#x9;Container kube-controller-manager ready: true, restart count 3&#xA;May  7 23:26:51.393: INFO: kube-scheduler-master3 started at 2021-05-07 20:00:42 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.393: INFO: &#x9;Container kube-scheduler ready: true, restart count 2&#xA;May  7 23:26:51.393: INFO: kube-proxy-lvj72 started at 2021-05-07 20:01:27 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.393: INFO: &#x9;Container kube-proxy ready: true, restart count 1&#xA;May  7 23:26:51.393: INFO: kube-flannel-j2xn4 started at 2021-05-07 20:02:02 +0000 UTC (1+1 container statuses recorded)&#xA;May  7 23:26:51.393: INFO: &#x9;Init container install-cni ready: true, restart count 1&#xA;May  7 23:26:51.393: INFO: &#x9;Container kube-flannel ready: true, restart count 2&#xA;May  7 23:26:51.393: INFO: kube-multus-ds-amd64-hhqgw started at 2021-05-07 20:02:10 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.393: INFO: &#x9;Container kube-multus ready: true, restart count 1&#xA;May  7 23:26:51.393: INFO: coredns-7677f9bb54-pj7xl started at 2021-05-07 20:02:35 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.393: INFO: &#x9;Container coredns ready: true, restart count 2&#xA;W0507 23:26:51.406705      36 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled.&#xA;May  7 23:26:51.430: INFO: &#xA;Latency metrics for node master3&#xA;May  7 23:26:51.430: INFO: &#xA;Logging node info for node node1&#xA;May  7 23:26:51.434: INFO: Node Info: &amp;Node{ObjectMeta:{node1   /api/v1/nodes/node1 24d047f7-eb29-4f1d-96d0-63b225220bd5 75255 0 2021-05-07 20:01:25 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux cmk.intel.com/cmk-node:true feature.node.kubernetes.io/cpu-cpuid.ADX:true feature.node.kubernetes.io/cpu-cpuid.AESNI:true feature.node.kubernetes.io/cpu-cpuid.AVX:true feature.node.kubernetes.io/cpu-cpuid.AVX2:true feature.node.kubernetes.io/cpu-cpuid.AVX512BW:true feature.node.kubernetes.io/cpu-cpuid.AVX512CD:true feature.node.kubernetes.io/cpu-cpuid.AVX512DQ:true feature.node.kubernetes.io/cpu-cpuid.AVX512F:true feature.node.kubernetes.io/cpu-cpuid.AVX512VL:true feature.node.kubernetes.io/cpu-cpuid.FMA3:true feature.node.kubernetes.io/cpu-cpuid.HLE:true feature.node.kubernetes.io/cpu-cpuid.IBPB:true feature.node.kubernetes.io/cpu-cpuid.MPX:true feature.node.kubernetes.io/cpu-cpuid.RTM:true feature.node.kubernetes.io/cpu-cpuid.STIBP:true feature.node.kubernetes.io/cpu-cpuid.VMX:true feature.node.kubernetes.io/cpu-hardware_multithreading:true feature.node.kubernetes.io/cpu-pstate.turbo:true feature.node.kubernetes.io/cpu-rdt.RDTCMT:true feature.node.kubernetes.io/cpu-rdt.RDTL3CA:true feature.node.kubernetes.io/cpu-rdt.RDTMBA:true feature.node.kubernetes.io/cpu-rdt.RDTMBM:true feature.node.kubernetes.io/cpu-rdt.RDTMON:true feature.node.kubernetes.io/kernel-config.NO_HZ:true feature.node.kubernetes.io/kernel-config.NO_HZ_FULL:true feature.node.kubernetes.io/kernel-selinux.enabled:true feature.node.kubernetes.io/kernel-version.full:3.10.0-1160.25.1.el7.x86_64 feature.node.kubernetes.io/kernel-version.major:3 feature.node.kubernetes.io/kernel-version.minor:10 feature.node.kubernetes.io/kernel-version.revision:0 feature.node.kubernetes.io/memory-numa:true feature.node.kubernetes.io/network-sriov.capable:true feature.node.kubernetes.io/network-sriov.configured:true feature.node.kubernetes.io/pci-0300_1a03.present:true feature.node.kubernetes.io/storage-nonrotationaldisk:true feature.node.kubernetes.io/system-os_release.ID:centos feature.node.kubernetes.io/system-os_release.VERSION_ID:7 feature.node.kubernetes.io/system-os_release.VERSION_ID.major:7 kubernetes.io/arch:amd64 kubernetes.io/hostname:node1 kubernetes.io/os:linux] map[flannel.alpha.coreos.com/backend-data:{&#34;VtepMAC&#34;:&#34;da:1d:6f:d2:a8:b4&#34;} flannel.alpha.coreos.com/backend-type:vxlan flannel.alpha.coreos.com/kube-subnet-manager:true flannel.alpha.coreos.com/public-ip:10.10.190.207 kubeadm.alpha.kubernetes.io/cri-socket:/var/run/dockershim.sock nfd.node.kubernetes.io/extended-resources: nfd.node.kubernetes.io/feature-labels:cpu-cpuid.ADX,cpu-cpuid.AESNI,cpu-cpuid.AVX,cpu-cpuid.AVX2,cpu-cpuid.AVX512BW,cpu-cpuid.AVX512CD,cpu-cpuid.AVX512DQ,cpu-cpuid.AVX512F,cpu-cpuid.AVX512VL,cpu-cpuid.FMA3,cpu-cpuid.HLE,cpu-cpuid.IBPB,cpu-cpuid.MPX,cpu-cpuid.RTM,cpu-cpuid.STIBP,cpu-cpuid.VMX,cpu-hardware_multithreading,cpu-pstate.turbo,cpu-rdt.RDTCMT,cpu-rdt.RDTL3CA,cpu-rdt.RDTMBA,cpu-rdt.RDTMBM,cpu-rdt.RDTMON,kernel-config.NO_HZ,kernel-config.NO_HZ_FULL,kernel-selinux.enabled,kernel-version.full,kernel-version.major,kernel-version.minor,kernel-version.revision,memory-numa,network-sriov.capable,network-sriov.configured,pci-0300_1a03.present,storage-nonrotationaldisk,system-os_release.ID,system-os_release.VERSION_ID,system-os_release.VERSION_ID.major nfd.node.kubernetes.io/worker.version:v0.7.0 node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] []  [{kube-controller-manager Update v1 2021-05-07 20:01:25 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:node.alpha.kubernetes.io/ttl&#34;:{}}},&#34;f:spec&#34;:{&#34;f:podCIDR&#34;:{},&#34;f:podCIDRs&#34;:{&#34;.&#34;:{},&#34;v:\&#34;10.244.3.0/24\&#34;&#34;:{}}}}} {kubeadm Update v1 2021-05-07 20:01:25 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:kubeadm.alpha.kubernetes.io/cri-socket&#34;:{}}}}} {flanneld Update v1 2021-05-07 20:02:06 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:flannel.alpha.coreos.com/backend-data&#34;:{},&#34;f:flannel.alpha.coreos.com/backend-type&#34;:{},&#34;f:flannel.alpha.coreos.com/kube-subnet-manager&#34;:{},&#34;f:flannel.alpha.coreos.com/public-ip&#34;:{}}},&#34;f:status&#34;:{&#34;f:conditions&#34;:{&#34;k:{\&#34;type\&#34;:\&#34;NetworkUnavailable\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}}}}}} {nfd-master Update v1 2021-05-07 20:08:43 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:nfd.node.kubernetes.io/extended-resources&#34;:{},&#34;f:nfd.node.kubernetes.io/feature-labels&#34;:{},&#34;f:nfd.node.kubernetes.io/worker.version&#34;:{}},&#34;f:labels&#34;:{&#34;f:feature.node.kubernetes.io/cpu-cpuid.ADX&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AESNI&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AVX&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AVX2&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AVX512BW&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AVX512CD&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AVX512DQ&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AVX512F&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AVX512VL&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.FMA3&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.HLE&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.IBPB&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.MPX&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.RTM&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.STIBP&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.VMX&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-hardware_multithreading&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-pstate.turbo&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-rdt.RDTCMT&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-rdt.RDTL3CA&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-rdt.RDTMBA&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-rdt.RDTMBM&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-rdt.RDTMON&#34;:{},&#34;f:feature.node.kubernetes.io/kernel-config.NO_HZ&#34;:{},&#34;f:feature.node.kubernetes.io/kernel-config.NO_HZ_FULL&#34;:{},&#34;f:feature.node.kubernetes.io/kernel-selinux.enabled&#34;:{},&#34;f:feature.node.kubernetes.io/kernel-version.full&#34;:{},&#34;f:feature.node.kubernetes.io/kernel-version.major&#34;:{},&#34;f:feature.node.kubernetes.io/kernel-version.minor&#34;:{},&#34;f:feature.node.kubernetes.io/kernel-version.revision&#34;:{},&#34;f:feature.node.kubernetes.io/memory-numa&#34;:{},&#34;f:feature.node.kubernetes.io/network-sriov.capable&#34;:{},&#34;f:feature.node.kubernetes.io/network-sriov.configured&#34;:{},&#34;f:feature.node.kubernetes.io/pci-0300_1a03.present&#34;:{},&#34;f:feature.node.kubernetes.io/storage-nonrotationaldisk&#34;:{},&#34;f:feature.node.kubernetes.io/system-os_release.ID&#34;:{},&#34;f:feature.node.kubernetes.io/system-os_release.VERSION_ID&#34;:{},&#34;f:feature.node.kubernetes.io/system-os_release.VERSION_ID.major&#34;:{}}}}} {Swagger-Codegen Update v1 2021-05-07 20:11:09 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:labels&#34;:{&#34;f:cmk.intel.com/cmk-node&#34;:{}}},&#34;f:status&#34;:{&#34;f:capacity&#34;:{&#34;f:cmk.intel.com/exclusive-cores&#34;:{}}}}} {kubelet Update v1 2021-05-07 22:47:48 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;.&#34;:{},&#34;f:volumes.kubernetes.io/controller-managed-attach-detach&#34;:{}},&#34;f:labels&#34;:{&#34;.&#34;:{},&#34;f:beta.kubernetes.io/arch&#34;:{},&#34;f:beta.kubernetes.io/os&#34;:{},&#34;f:kubernetes.io/arch&#34;:{},&#34;f:kubernetes.io/hostname&#34;:{},&#34;f:kubernetes.io/os&#34;:{}}},&#34;f:status&#34;:{&#34;f:addresses&#34;:{&#34;.&#34;:{},&#34;k:{\&#34;type\&#34;:\&#34;Hostname\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:address&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;InternalIP\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:address&#34;:{},&#34;f:type&#34;:{}}},&#34;f:allocatable&#34;:{&#34;.&#34;:{},&#34;f:cmk.intel.com/exclusive-cores&#34;:{},&#34;f:cpu&#34;:{},&#34;f:ephemeral-storage&#34;:{},&#34;f:example.com/fakecpu&#34;:{},&#34;f:hugepages-1Gi&#34;:{},&#34;f:hugepages-2Mi&#34;:{},&#34;f:intel.com/intel_sriov_netdevice&#34;:{},&#34;f:memory&#34;:{},&#34;f:pods&#34;:{}},&#34;f:capacity&#34;:{&#34;.&#34;:{},&#34;f:cpu&#34;:{},&#34;f:ephemeral-storage&#34;:{},&#34;f:hugepages-1Gi&#34;:{},&#34;f:hugepages-2Mi&#34;:{},&#34;f:intel.com/intel_sriov_netdevice&#34;:{},&#34;f:memory&#34;:{},&#34;f:pods&#34;:{}},&#34;f:conditions&#34;:{&#34;.&#34;:{},&#34;k:{\&#34;type\&#34;:\&#34;DiskPressure\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;MemoryPressure\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;PIDPressure\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;Ready\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}}},&#34;f:daemonEndpoints&#34;:{&#34;f:kubeletEndpoint&#34;:{&#34;f:Port&#34;:{}}},&#34;f:images&#34;:{},&#34;f:nodeInfo&#34;:{&#34;f:architecture&#34;:{},&#34;f:bootID&#34;:{},&#34;f:containerRuntimeVersion&#34;:{},&#34;f:kernelVersion&#34;:{},&#34;f:kubeProxyVersion&#34;:{},&#34;f:kubeletVersion&#34;:{},&#34;f:machineID&#34;:{},&#34;f:operatingSystem&#34;:{},&#34;f:osImage&#34;:{},&#34;f:systemUUID&#34;:{}}}}} {e2e.test Update v1 2021-05-07 22:53:06 +0000 UTC FieldsV1 {&#34;f:status&#34;:{&#34;f:capacity&#34;:{&#34;f:example.com/fakecpu&#34;:{}}}}}]},Spec:NodeSpec{PodCIDR:10.244.3.0/24,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.244.3.0/24],},Status:NodeStatus{Capacity:ResourceList{cmk.intel.com/exclusive-cores: {{3 0} {&lt;nil&gt;} 3 DecimalSI},cpu: {{80 0} {&lt;nil&gt;} 80 DecimalSI},ephemeral-storage: {{450471260160 0} {&lt;nil&gt;} 439913340Ki BinarySI},example.com/fakecpu: {{1 3} {&lt;nil&gt;} 1k DecimalSI},hugepages-1Gi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},hugepages-2Mi: {{21474836480 0} {&lt;nil&gt;} 20Gi BinarySI},intel.com/intel_sriov_netdevice: {{4 0} {&lt;nil&gt;} 4 DecimalSI},memory: {{201269633024 0} {&lt;nil&gt;}  BinarySI},pods: {{110 0} {&lt;nil&gt;} 110 DecimalSI},},Allocatable:ResourceList{cmk.intel.com/exclusive-cores: {{3 0} {&lt;nil&gt;} 3 DecimalSI},cpu: {{77 0} {&lt;nil&gt;} 77 DecimalSI},ephemeral-storage: {{405424133473 0} {&lt;nil&gt;} 405424133473 DecimalSI},example.com/fakecpu: {{1 3} {&lt;nil&gt;} 1k DecimalSI},hugepages-1Gi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},hugepages-2Mi: {{21474836480 0} {&lt;nil&gt;} 20Gi BinarySI},intel.com/intel_sriov_netdevice: {{4 0} {&lt;nil&gt;} 4 DecimalSI},memory: {{178884632576 0} {&lt;nil&gt;}  BinarySI},pods: {{110 0} {&lt;nil&gt;} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2021-05-07 20:03:55 +0000 UTC,LastTransitionTime:2021-05-07 20:03:55 +0000 UTC,Reason:FlannelIsUp,Message:Flannel is running on this node,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2021-05-07 23:26:44 +0000 UTC,LastTransitionTime:2021-05-07 20:01:25 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2021-05-07 23:26:44 +0000 UTC,LastTransitionTime:2021-05-07 20:01:25 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2021-05-07 23:26:44 +0000 UTC,LastTransitionTime:2021-05-07 20:01:25 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2021-05-07 23:26:44 +0000 UTC,LastTransitionTime:2021-05-07 20:03:44 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.10.190.207,},NodeAddress{Type:Hostname,Address:node1,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:d79c3a817fbd4f03a173930e545bb174,SystemUUID:00CDA902-D022-E711-906E-0017A4403562,BootID:53771151-d654-4c88-80fe-48032d3317a5,KernelVersion:3.10.0-1160.25.1.el7.x86_64,OSImage:CentOS Linux 7 (Core),ContainerRuntimeVersion:docker://19.3.14,KubeletVersion:v1.19.8,KubeProxyVersion:v1.19.8,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[localhost:30500/barometer-collectd@sha256:facbba279b626f9254dfef1616802a1eadebffadd9d37bde82b5562163121c72 localhost:30500/barometer-collectd:stable],SizeBytes:1464089363,},ContainerImage{Names:[&lt;none&gt;@&lt;none&gt; &lt;none&gt;:&lt;none&gt;],SizeBytes:1002488002,},ContainerImage{Names:[opnfv/barometer-collectd@sha256:ed5c574f653e2a39e784ff322033a2319aafde7366c803a88f20f7a2a8bc1efb opnfv/barometer-collectd:stable],SizeBytes:825413035,},ContainerImage{Names:[localhost:30500/cmk@sha256:9955df6c40f7a908013f9d77afdcd5df3df7f47ffb0eb09bbcb27d61112121ec cmk:v1.5.1 localhost:30500/cmk:v1.5.1],SizeBytes:726657349,},ContainerImage{Names:[centos/python-36-centos7@sha256:ac50754646f0d37616515fb30467d8743fb12954260ec36c9ecb5a94499447e0 centos/python-36-centos7:latest],SizeBytes:650061677,},ContainerImage{Names:[golang@sha256:1636899c10870ab66c48d960a9df620f4f9e86a0c72fbacf36032d27404e7e6c golang:alpine3.12],SizeBytes:301156062,},ContainerImage{Names:[nfvpe/multus@sha256:ac1266b87ba44c09dc2a336f0d5dad968fccd389ce1944a85e87b32cd21f7224 nfvpe/multus:v3.4.2],SizeBytes:276587882,},ContainerImage{Names:[gcr.io/kubernetes-e2e-test-images/jessie-dnsutils@sha256:ad583e33cb284f7ef046673809b146ec4053cda19b54a85d2b180a86169715eb gcr.io/kubernetes-e2e-test-images/jessie-dnsutils:1.0],SizeBytes:195659796,},ContainerImage{Names:[grafana/grafana@sha256:89304bc2335f4976618548d7b93d165ed67369d3a051d2f627fc4e0aa3d0aff1 grafana/grafana:7.1.0],SizeBytes:179601493,},ContainerImage{Names:[quay.io/prometheus/prometheus@sha256:d4ba4dd1a9ebb90916d0bfed3c204adcb118ed24546bf8dd2e6b30fc0fd2009e quay.io/prometheus/prometheus:v2.20.0],SizeBytes:144886595,},ContainerImage{Names:[nginx@sha256:75a55d33ecc73c2a242450a9f1cc858499d468f077ea942867e662c247b5e412 nginx:1.19],SizeBytes:133117205,},ContainerImage{Names:[httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060 httpd:2.4.38-alpine],SizeBytes:123781643,},ContainerImage{Names:[k8s.gcr.io/kube-apiserver@sha256:82e0ce4e1d08f3749d05c584fd60986197bfcdf9ce71d4666c71674221d53135 k8s.gcr.io/kube-apiserver:v1.19.8],SizeBytes:118813022,},ContainerImage{Names:[k8s.gcr.io/kube-proxy@sha256:8ed30419d9cf8965854f9ed501159e15deb30c42c3d2a60a278ae169320d140e k8s.gcr.io/kube-proxy:v1.19.8],SizeBytes:117674285,},ContainerImage{Names:[k8s.gcr.io/e2e-test-images/agnhost@sha256:17e61a0b9e498b6c73ed97670906be3d5a3ae394739c1bd5b619e1a004885cf0 k8s.gcr.io/e2e-test-images/agnhost:2.20],SizeBytes:113869866,},ContainerImage{Names:[k8s.gcr.io/kube-controller-manager@sha256:2769005fb667dbb936009894d01fe35f5ce1bce45eee80a9ce3c139b9be4080e k8s.gcr.io/kube-controller-manager:v1.19.8],SizeBytes:110805342,},ContainerImage{Names:[gcr.io/k8s-staging-nfd/node-feature-discovery@sha256:5d116c2c340be665a2c8adc9aca7f91396bd5cbde4add4fdc8dab95d8db43425 gcr.io/k8s-staging-nfd/node-feature-discovery:v0.7.0],SizeBytes:108309584,},ContainerImage{Names:[quay.io/coreos/flannel@sha256:34860ea294a018d392e61936f19a7862d5e92039d196cac9176da14b2bbd0fe3 quay.io/coreos/flannel@sha256:ac5322604bcab484955e6dbc507f45a906bde79046667322e3918a8578ab08c8 quay.io/coreos/flannel:v0.13.0 quay.io/coreos/flannel:v0.13.0-amd64],SizeBytes:57156911,},ContainerImage{Names:[directxman12/k8s-prometheus-adapter-amd64@sha256:b63dc612e3cb73f79d2401a4516f794f9f0a83002600ca72e675e41baecff437 directxman12/k8s-prometheus-adapter-amd64:v0.6.0],SizeBytes:53267842,},ContainerImage{Names:[quay.io/coreos/kube-rbac-proxy@sha256:e10d1d982dd653db74ca87a1d1ad017bc5ef1aeb651bdea089debf16485b080b quay.io/coreos/kube-rbac-proxy:v0.5.0],SizeBytes:46626428,},ContainerImage{Names:[k8s.gcr.io/kube-scheduler@sha256:bb66135ce9a25ac405e43bbae6a2ac766e0efcac0a6a73ef9d1fbb4cf4732c9b k8s.gcr.io/kube-scheduler:v1.19.8],SizeBytes:46510430,},ContainerImage{Names:[localhost:30500/sriov-device-plugin@sha256:bae53f2ec899d23f9342d730c376a1ee3805e96fd1e5e4857e65085e6529557d nfvpe/sriov-device-plugin:latest localhost:30500/sriov-device-plugin:v3.3.1],SizeBytes:44392820,},ContainerImage{Names:[gcr.io/kubernetes-e2e-test-images/nonroot@sha256:4bd7ae247de5c988700233c5a4b55e804ffe90f8c66ae64853f1dae37b847213 gcr.io/kubernetes-e2e-test-images/nonroot:1.0],SizeBytes:42321438,},ContainerImage{Names:[kubernetesui/metrics-scraper@sha256:1f977343873ed0e2efd4916a6b2f3075f310ff6fe42ee098f54fc58aa7a28ab7 kubernetesui/metrics-scraper:v1.0.6],SizeBytes:34548789,},ContainerImage{Names:[quay.io/prometheus/node-exporter@sha256:a2f29256e53cc3e0b64d7a472512600b2e9410347d53cdc85b49f659c17e02ee quay.io/prometheus/node-exporter:v0.18.1],SizeBytes:22933477,},ContainerImage{Names:[prom/collectd-exporter@sha256:73fbda4d24421bff3b741c27efc36f1b6fbe7c57c378d56d4ff78101cd556654],SizeBytes:17463681,},ContainerImage{Names:[nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 nginx:1.14-alpine],SizeBytes:16032814,},ContainerImage{Names:[gcr.io/google-samples/hello-go-gke@sha256:4ea9cd3d35f81fc91bdebca3fae50c180a1048be0613ad0f811595365040396e gcr.io/google-samples/hello-go-gke:1.0],SizeBytes:11443478,},ContainerImage{Names:[quay.io/coreos/prometheus-config-reloader@sha256:c679a143b24b7731ad1577a9865aa3805426cbf1b25e30807b951dff68466ffd quay.io/coreos/prometheus-config-reloader:v0.40.0],SizeBytes:10131705,},ContainerImage{Names:[jimmidyson/configmap-reload@sha256:d107c7a235c266273b1c3502a391fec374430e5625539403d0de797fa9c556a2 jimmidyson/configmap-reload:v0.3.0],SizeBytes:9700438,},ContainerImage{Names:[gcr.io/kubernetes-e2e-test-images/nonewprivs@sha256:10066e9039219449fe3c81f38fe01928f87914150768ab81b62a468e51fa7411 gcr.io/kubernetes-e2e-test-images/nonewprivs:1.0],SizeBytes:6757579,},ContainerImage{Names:[appropriate/curl@sha256:027a0ad3c69d085fea765afca9984787b780c172cead6502fec989198b98d8bb appropriate/curl:edge],SizeBytes:5654234,},ContainerImage{Names:[alpine@sha256:36553b10a4947067b9fbb7d532951066293a68eae893beba1d9235f7d11a20ad alpine:3.12],SizeBytes:5581415,},ContainerImage{Names:[gcr.io/kubernetes-e2e-test-images/nautilus@sha256:33a732d4c42a266912a5091598a0f07653c9134db4b8d571690d8afd509e0bfc gcr.io/kubernetes-e2e-test-images/nautilus:1.0],SizeBytes:4753501,},ContainerImage{Names:[gcr.io/authenticated-image-pulling/alpine@sha256:7ff177862cb50c602bfe81f805969412e619c054a2bbead977d0c276988aa4a0 gcr.io/authenticated-image-pulling/alpine:3.7],SizeBytes:4206620,},ContainerImage{Names:[busybox@sha256:8ccbac733d19c0dd4d70b4f0c1e12245b5fa3ad24758a11035ee505c629c0796 busybox:1.29],SizeBytes:1154361,},ContainerImage{Names:[busybox@sha256:141c253bc4c3fd0a201d32dc1f493bcf3fff003b6df416dea4f41046e0f37d47 busybox:1.28],SizeBytes:1146369,},ContainerImage{Names:[k8s.gcr.io/pause@sha256:927d98197ec1141a368550822d18fa1c60bdae27b78b0c004f705f548c07814f k8s.gcr.io/pause:3.2],SizeBytes:682696,},ContainerImage{Names:[k8s.gcr.io/pause@sha256:a319ac2280eb7e3a59e252e54b76327cb4a33cf8389053b0d78277f22bbca2fa k8s.gcr.io/pause:3.3],SizeBytes:682696,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},}&#xA;May  7 23:26:51.434: INFO: &#xA;Logging kubelet events for node node1&#xA;May  7 23:26:51.437: INFO: &#xA;Logging pods the kubelet thinks is on node node1&#xA;May  7 23:26:51.451: INFO: concurrent-1620429960-n8rgs started at 2021-05-07 23:26:09 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.451: INFO: &#x9;Container c ready: false, restart count 0&#xA;May  7 23:26:51.452: INFO: webserver-7ccc6798d4-gcnhb started at 2021-05-07 23:21:43 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container httpd ready: false, restart count 0&#xA;May  7 23:26:51.452: INFO: cmk-init-discover-node1-krbjn started at 2021-05-07 20:11:05 +0000 UTC (0+3 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container discover ready: false, restart count 0&#xA;May  7 23:26:51.452: INFO: &#x9;Container init ready: false, restart count 0&#xA;May  7 23:26:51.452: INFO: &#x9;Container install ready: false, restart count 0&#xA;May  7 23:26:51.452: INFO: webserver-57c6549b9c-n675z started at 2021-05-07 23:21:33 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container httpd ready: false, restart count 0&#xA;May  7 23:26:51.452: INFO: kube-proxy-bms7z started at 2021-05-07 20:01:27 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container kube-proxy ready: true, restart count 2&#xA;May  7 23:26:51.452: INFO: webserver-69b69768db-9p47q started at 2021-05-07 23:21:33 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container httpd ready: false, restart count 0&#xA;May  7 23:26:51.452: INFO: cmk-gjhf2 started at 2021-05-07 20:11:48 +0000 UTC (0+2 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container nodereport ready: true, restart count 0&#xA;May  7 23:26:51.452: INFO: &#x9;Container reconcile ready: true, restart count 0&#xA;May  7 23:26:51.452: INFO: prometheus-k8s-0 started at 2021-05-07 20:12:50 +0000 UTC (0+5 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container custom-metrics-apiserver ready: true, restart count 0&#xA;May  7 23:26:51.452: INFO: &#x9;Container grafana ready: true, restart count 0&#xA;May  7 23:26:51.452: INFO: &#x9;Container prometheus ready: true, restart count 1&#xA;May  7 23:26:51.452: INFO: &#x9;Container prometheus-config-reloader ready: true, restart count 0&#xA;May  7 23:26:51.452: INFO: &#x9;Container rules-configmap-reloader ready: true, restart count 0&#xA;May  7 23:26:51.452: INFO: concurrent-1620429780-794pc started at 2021-05-07 23:23:08 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container c ready: false, restart count 0&#xA;May  7 23:26:51.452: INFO: kube-multus-ds-amd64-fxgdb started at 2021-05-07 20:02:10 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container kube-multus ready: true, restart count 1&#xA;May  7 23:26:51.452: INFO: successful-jobs-history-limit-1620429960-zfzbk started at 2021-05-07 23:26:09 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container c ready: false, restart count 0&#xA;May  7 23:26:51.452: INFO: kube-flannel-qm7lv started at 2021-05-07 20:02:02 +0000 UTC (1+1 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Init container install-cni ready: true, restart count 2&#xA;May  7 23:26:51.452: INFO: &#x9;Container kube-flannel ready: true, restart count 2&#xA;May  7 23:26:51.452: INFO: kubernetes-metrics-scraper-678c97765c-g7srv started at 2021-05-07 20:02:35 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container kubernetes-metrics-scraper ready: true, restart count 1&#xA;May  7 23:26:51.452: INFO: sriov-net-dp-kube-sriov-device-plugin-amd64-mq752 started at 2021-05-07 20:09:23 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container kube-sriovdp ready: true, restart count 0&#xA;May  7 23:26:51.452: INFO: node-exporter-mpq58 started at 2021-05-07 20:12:42 +0000 UTC (0+2 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container kube-rbac-proxy ready: true, restart count 0&#xA;May  7 23:26:51.452: INFO: &#x9;Container node-exporter ready: true, restart count 0&#xA;May  7 23:26:51.452: INFO: collectd-tmjfs started at 2021-05-07 20:18:33 +0000 UTC (0+3 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container collectd ready: true, restart count 0&#xA;May  7 23:26:51.452: INFO: &#x9;Container collectd-exporter ready: true, restart count 0&#xA;May  7 23:26:51.452: INFO: &#x9;Container rbac-proxy ready: true, restart count 0&#xA;May  7 23:26:51.452: INFO: ss2-0 started at 2021-05-07 23:20:52 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container webserver ready: false, restart count 0&#xA;May  7 23:26:51.452: INFO: nginx-proxy-node1 started at 2021-05-07 20:07:46 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container nginx-proxy ready: true, restart count 1&#xA;May  7 23:26:51.452: INFO: concurrent-1620429840-hrj6l started at 2021-05-07 23:24:08 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container c ready: false, restart count 0&#xA;May  7 23:26:51.452: INFO: node-feature-discovery-worker-xvbpd started at 2021-05-07 20:08:19 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.452: INFO: &#x9;Container nfd-worker ready: true, restart count 0&#xA;W0507 23:26:51.463043      36 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled.&#xA;May  7 23:26:51.496: INFO: &#xA;Latency metrics for node node1&#xA;May  7 23:26:51.496: INFO: &#xA;Logging node info for node node2&#xA;May  7 23:26:51.498: INFO: Node Info: &amp;Node{ObjectMeta:{node2   /api/v1/nodes/node2 eae422ac-f6f0-4e9a-961d-e133dffc36be 75277 0 2021-05-07 20:01:25 +0000 UTC &lt;nil&gt; &lt;nil&gt; map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/os:linux cmk.intel.com/cmk-node:true feature.node.kubernetes.io/cpu-cpuid.ADX:true feature.node.kubernetes.io/cpu-cpuid.AESNI:true feature.node.kubernetes.io/cpu-cpuid.AVX:true feature.node.kubernetes.io/cpu-cpuid.AVX2:true feature.node.kubernetes.io/cpu-cpuid.AVX512BW:true feature.node.kubernetes.io/cpu-cpuid.AVX512CD:true feature.node.kubernetes.io/cpu-cpuid.AVX512DQ:true feature.node.kubernetes.io/cpu-cpuid.AVX512F:true feature.node.kubernetes.io/cpu-cpuid.AVX512VL:true feature.node.kubernetes.io/cpu-cpuid.FMA3:true feature.node.kubernetes.io/cpu-cpuid.HLE:true feature.node.kubernetes.io/cpu-cpuid.IBPB:true feature.node.kubernetes.io/cpu-cpuid.MPX:true feature.node.kubernetes.io/cpu-cpuid.RTM:true feature.node.kubernetes.io/cpu-cpuid.STIBP:true feature.node.kubernetes.io/cpu-cpuid.VMX:true feature.node.kubernetes.io/cpu-hardware_multithreading:true feature.node.kubernetes.io/cpu-pstate.turbo:true feature.node.kubernetes.io/cpu-rdt.RDTCMT:true feature.node.kubernetes.io/cpu-rdt.RDTL3CA:true feature.node.kubernetes.io/cpu-rdt.RDTMBA:true feature.node.kubernetes.io/cpu-rdt.RDTMBM:true feature.node.kubernetes.io/cpu-rdt.RDTMON:true feature.node.kubernetes.io/kernel-config.NO_HZ:true feature.node.kubernetes.io/kernel-config.NO_HZ_FULL:true feature.node.kubernetes.io/kernel-selinux.enabled:true feature.node.kubernetes.io/kernel-version.full:3.10.0-1160.25.1.el7.x86_64 feature.node.kubernetes.io/kernel-version.major:3 feature.node.kubernetes.io/kernel-version.minor:10 feature.node.kubernetes.io/kernel-version.revision:0 feature.node.kubernetes.io/memory-numa:true feature.node.kubernetes.io/network-sriov.capable:true feature.node.kubernetes.io/network-sriov.configured:true feature.node.kubernetes.io/pci-0300_1a03.present:true feature.node.kubernetes.io/storage-nonrotationaldisk:true feature.node.kubernetes.io/system-os_release.ID:centos feature.node.kubernetes.io/system-os_release.VERSION_ID:7 feature.node.kubernetes.io/system-os_release.VERSION_ID.major:7 kubernetes.io/arch:amd64 kubernetes.io/hostname:node2 kubernetes.io/os:linux] map[flannel.alpha.coreos.com/backend-data:{&#34;VtepMAC&#34;:&#34;22:df:da:84:9b:00&#34;} flannel.alpha.coreos.com/backend-type:vxlan flannel.alpha.coreos.com/kube-subnet-manager:true flannel.alpha.coreos.com/public-ip:10.10.190.208 kubeadm.alpha.kubernetes.io/cri-socket:/var/run/dockershim.sock nfd.node.kubernetes.io/extended-resources: nfd.node.kubernetes.io/feature-labels:cpu-cpuid.ADX,cpu-cpuid.AESNI,cpu-cpuid.AVX,cpu-cpuid.AVX2,cpu-cpuid.AVX512BW,cpu-cpuid.AVX512CD,cpu-cpuid.AVX512DQ,cpu-cpuid.AVX512F,cpu-cpuid.AVX512VL,cpu-cpuid.FMA3,cpu-cpuid.HLE,cpu-cpuid.IBPB,cpu-cpuid.MPX,cpu-cpuid.RTM,cpu-cpuid.STIBP,cpu-cpuid.VMX,cpu-hardware_multithreading,cpu-pstate.turbo,cpu-rdt.RDTCMT,cpu-rdt.RDTL3CA,cpu-rdt.RDTMBA,cpu-rdt.RDTMBM,cpu-rdt.RDTMON,kernel-config.NO_HZ,kernel-config.NO_HZ_FULL,kernel-selinux.enabled,kernel-version.full,kernel-version.major,kernel-version.minor,kernel-version.revision,memory-numa,network-sriov.capable,network-sriov.configured,pci-0300_1a03.present,storage-nonrotationaldisk,system-os_release.ID,system-os_release.VERSION_ID,system-os_release.VERSION_ID.major nfd.node.kubernetes.io/worker.version:v0.7.0 node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] []  [{kube-controller-manager Update v1 2021-05-07 20:01:25 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:node.alpha.kubernetes.io/ttl&#34;:{}}},&#34;f:spec&#34;:{&#34;f:podCIDR&#34;:{},&#34;f:podCIDRs&#34;:{&#34;.&#34;:{},&#34;v:\&#34;10.244.4.0/24\&#34;&#34;:{}}}}} {kubeadm Update v1 2021-05-07 20:01:25 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:kubeadm.alpha.kubernetes.io/cri-socket&#34;:{}}}}} {flanneld Update v1 2021-05-07 20:02:07 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:flannel.alpha.coreos.com/backend-data&#34;:{},&#34;f:flannel.alpha.coreos.com/backend-type&#34;:{},&#34;f:flannel.alpha.coreos.com/kube-subnet-manager&#34;:{},&#34;f:flannel.alpha.coreos.com/public-ip&#34;:{}}},&#34;f:status&#34;:{&#34;f:conditions&#34;:{&#34;k:{\&#34;type\&#34;:\&#34;NetworkUnavailable\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}}}}}} {nfd-master Update v1 2021-05-07 20:08:45 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;f:nfd.node.kubernetes.io/extended-resources&#34;:{},&#34;f:nfd.node.kubernetes.io/feature-labels&#34;:{},&#34;f:nfd.node.kubernetes.io/worker.version&#34;:{}},&#34;f:labels&#34;:{&#34;f:feature.node.kubernetes.io/cpu-cpuid.ADX&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AESNI&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AVX&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AVX2&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AVX512BW&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AVX512CD&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AVX512DQ&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AVX512F&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.AVX512VL&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.FMA3&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.HLE&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.IBPB&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.MPX&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.RTM&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.STIBP&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-cpuid.VMX&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-hardware_multithreading&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-pstate.turbo&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-rdt.RDTCMT&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-rdt.RDTL3CA&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-rdt.RDTMBA&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-rdt.RDTMBM&#34;:{},&#34;f:feature.node.kubernetes.io/cpu-rdt.RDTMON&#34;:{},&#34;f:feature.node.kubernetes.io/kernel-config.NO_HZ&#34;:{},&#34;f:feature.node.kubernetes.io/kernel-config.NO_HZ_FULL&#34;:{},&#34;f:feature.node.kubernetes.io/kernel-selinux.enabled&#34;:{},&#34;f:feature.node.kubernetes.io/kernel-version.full&#34;:{},&#34;f:feature.node.kubernetes.io/kernel-version.major&#34;:{},&#34;f:feature.node.kubernetes.io/kernel-version.minor&#34;:{},&#34;f:feature.node.kubernetes.io/kernel-version.revision&#34;:{},&#34;f:feature.node.kubernetes.io/memory-numa&#34;:{},&#34;f:feature.node.kubernetes.io/network-sriov.capable&#34;:{},&#34;f:feature.node.kubernetes.io/network-sriov.configured&#34;:{},&#34;f:feature.node.kubernetes.io/pci-0300_1a03.present&#34;:{},&#34;f:feature.node.kubernetes.io/storage-nonrotationaldisk&#34;:{},&#34;f:feature.node.kubernetes.io/system-os_release.ID&#34;:{},&#34;f:feature.node.kubernetes.io/system-os_release.VERSION_ID&#34;:{},&#34;f:feature.node.kubernetes.io/system-os_release.VERSION_ID.major&#34;:{}}}}} {Swagger-Codegen Update v1 2021-05-07 20:11:31 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:labels&#34;:{&#34;f:cmk.intel.com/cmk-node&#34;:{}}},&#34;f:status&#34;:{&#34;f:capacity&#34;:{&#34;f:cmk.intel.com/exclusive-cores&#34;:{}}}}} {kubelet Update v1 2021-05-07 22:47:55 +0000 UTC FieldsV1 {&#34;f:metadata&#34;:{&#34;f:annotations&#34;:{&#34;.&#34;:{},&#34;f:volumes.kubernetes.io/controller-managed-attach-detach&#34;:{}},&#34;f:labels&#34;:{&#34;.&#34;:{},&#34;f:beta.kubernetes.io/arch&#34;:{},&#34;f:beta.kubernetes.io/os&#34;:{},&#34;f:kubernetes.io/arch&#34;:{},&#34;f:kubernetes.io/hostname&#34;:{},&#34;f:kubernetes.io/os&#34;:{}}},&#34;f:status&#34;:{&#34;f:addresses&#34;:{&#34;.&#34;:{},&#34;k:{\&#34;type\&#34;:\&#34;Hostname\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:address&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;InternalIP\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:address&#34;:{},&#34;f:type&#34;:{}}},&#34;f:allocatable&#34;:{&#34;.&#34;:{},&#34;f:cmk.intel.com/exclusive-cores&#34;:{},&#34;f:cpu&#34;:{},&#34;f:ephemeral-storage&#34;:{},&#34;f:hugepages-1Gi&#34;:{},&#34;f:hugepages-2Mi&#34;:{},&#34;f:intel.com/intel_sriov_netdevice&#34;:{},&#34;f:memory&#34;:{},&#34;f:pods&#34;:{}},&#34;f:capacity&#34;:{&#34;.&#34;:{},&#34;f:cpu&#34;:{},&#34;f:ephemeral-storage&#34;:{},&#34;f:hugepages-1Gi&#34;:{},&#34;f:hugepages-2Mi&#34;:{},&#34;f:intel.com/intel_sriov_netdevice&#34;:{},&#34;f:memory&#34;:{},&#34;f:pods&#34;:{}},&#34;f:conditions&#34;:{&#34;.&#34;:{},&#34;k:{\&#34;type\&#34;:\&#34;DiskPressure\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;MemoryPressure\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;PIDPressure\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}},&#34;k:{\&#34;type\&#34;:\&#34;Ready\&#34;}&#34;:{&#34;.&#34;:{},&#34;f:lastHeartbeatTime&#34;:{},&#34;f:lastTransitionTime&#34;:{},&#34;f:message&#34;:{},&#34;f:reason&#34;:{},&#34;f:status&#34;:{},&#34;f:type&#34;:{}}},&#34;f:daemonEndpoints&#34;:{&#34;f:kubeletEndpoint&#34;:{&#34;f:Port&#34;:{}}},&#34;f:images&#34;:{},&#34;f:nodeInfo&#34;:{&#34;f:architecture&#34;:{},&#34;f:bootID&#34;:{},&#34;f:containerRuntimeVersion&#34;:{},&#34;f:kernelVersion&#34;:{},&#34;f:kubeProxyVersion&#34;:{},&#34;f:kubeletVersion&#34;:{},&#34;f:machineID&#34;:{},&#34;f:operatingSystem&#34;:{},&#34;f:osImage&#34;:{},&#34;f:systemUUID&#34;:{}}}}}]},Spec:NodeSpec{PodCIDR:10.244.4.0/24,DoNotUseExternalID:,ProviderID:,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.244.4.0/24],},Status:NodeStatus{Capacity:ResourceList{cmk.intel.com/exclusive-cores: {{3 0} {&lt;nil&gt;} 3 DecimalSI},cpu: {{80 0} {&lt;nil&gt;} 80 DecimalSI},ephemeral-storage: {{450471260160 0} {&lt;nil&gt;} 439913340Ki BinarySI},hugepages-1Gi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},hugepages-2Mi: {{21474836480 0} {&lt;nil&gt;} 20Gi BinarySI},intel.com/intel_sriov_netdevice: {{4 0} {&lt;nil&gt;} 4 DecimalSI},memory: {{201269633024 0} {&lt;nil&gt;}  BinarySI},pods: {{110 0} {&lt;nil&gt;} 110 DecimalSI},},Allocatable:ResourceList{cmk.intel.com/exclusive-cores: {{3 0} {&lt;nil&gt;} 3 DecimalSI},cpu: {{77 0} {&lt;nil&gt;} 77 DecimalSI},ephemeral-storage: {{405424133473 0} {&lt;nil&gt;} 405424133473 DecimalSI},hugepages-1Gi: {{0 0} {&lt;nil&gt;} 0 DecimalSI},hugepages-2Mi: {{21474836480 0} {&lt;nil&gt;} 20Gi BinarySI},intel.com/intel_sriov_netdevice: {{4 0} {&lt;nil&gt;} 4 DecimalSI},memory: {{178884632576 0} {&lt;nil&gt;}  BinarySI},pods: {{110 0} {&lt;nil&gt;} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2021-05-07 20:05:02 +0000 UTC,LastTransitionTime:2021-05-07 20:05:02 +0000 UTC,Reason:FlannelIsUp,Message:Flannel is running on this node,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2021-05-07 23:26:48 +0000 UTC,LastTransitionTime:2021-05-07 20:01:25 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2021-05-07 23:26:48 +0000 UTC,LastTransitionTime:2021-05-07 20:01:25 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2021-05-07 23:26:48 +0000 UTC,LastTransitionTime:2021-05-07 20:01:25 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2021-05-07 23:26:48 +0000 UTC,LastTransitionTime:2021-05-07 20:02:07 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.10.190.208,},NodeAddress{Type:Hostname,Address:node2,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:6f56c5a750d0441dba0ffa6273fb1a17,SystemUUID:80B3CD56-852F-E711-906E-0017A4403562,BootID:98b263e9-3136-45ed-9b07-5f5b6b9d69b8,KernelVersion:3.10.0-1160.25.1.el7.x86_64,OSImage:CentOS Linux 7 (Core),ContainerRuntimeVersion:docker://19.3.14,KubeletVersion:v1.19.8,KubeProxyVersion:v1.19.8,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[localhost:30500/barometer-collectd@sha256:facbba279b626f9254dfef1616802a1eadebffadd9d37bde82b5562163121c72 localhost:30500/barometer-collectd:stable],SizeBytes:1464089363,},ContainerImage{Names:[localhost:30500/cmk@sha256:9955df6c40f7a908013f9d77afdcd5df3df7f47ffb0eb09bbcb27d61112121ec localhost:30500/cmk:v1.5.1],SizeBytes:726657349,},ContainerImage{Names:[cmk:v1.5.1],SizeBytes:726657349,},ContainerImage{Names:[centos/python-36-centos7@sha256:ac50754646f0d37616515fb30467d8743fb12954260ec36c9ecb5a94499447e0 centos/python-36-centos7:latest],SizeBytes:650061677,},ContainerImage{Names:[nfvpe/multus@sha256:ac1266b87ba44c09dc2a336f0d5dad968fccd389ce1944a85e87b32cd21f7224 nfvpe/multus:v3.4.2],SizeBytes:276587882,},ContainerImage{Names:[k8s.gcr.io/etcd@sha256:4ad90a11b55313b182afc186b9876c8e891531b8db4c9bf1541953021618d0e2 k8s.gcr.io/etcd:3.4.13-0],SizeBytes:253392289,},ContainerImage{Names:[kubernetesui/dashboard-amd64@sha256:3af248961c56916aeca8eb4000c15d6cf6a69641ea92f0540865bb37b495932f kubernetesui/dashboard-amd64:v2.1.0],SizeBytes:225733746,},ContainerImage{Names:[gcr.io/kubernetes-e2e-test-images/jessie-dnsutils@sha256:ad583e33cb284f7ef046673809b146ec4053cda19b54a85d2b180a86169715eb gcr.io/kubernetes-e2e-test-images/jessie-dnsutils:1.0],SizeBytes:195659796,},ContainerImage{Names:[nginx@sha256:75a55d33ecc73c2a242450a9f1cc858499d468f077ea942867e662c247b5e412 nginx:1.19],SizeBytes:133117205,},ContainerImage{Names:[httpd@sha256:eb8ccf084cf3e80eece1add239effefd171eb39adbc154d33c14260d905d4060 httpd:2.4.38-alpine],SizeBytes:123781643,},ContainerImage{Names:[k8s.gcr.io/kube-apiserver@sha256:82e0ce4e1d08f3749d05c584fd60986197bfcdf9ce71d4666c71674221d53135 k8s.gcr.io/kube-apiserver:v1.19.8],SizeBytes:118813022,},ContainerImage{Names:[k8s.gcr.io/kube-proxy@sha256:8ed30419d9cf8965854f9ed501159e15deb30c42c3d2a60a278ae169320d140e k8s.gcr.io/kube-proxy:v1.19.8],SizeBytes:117674285,},ContainerImage{Names:[k8s.gcr.io/e2e-test-images/agnhost@sha256:17e61a0b9e498b6c73ed97670906be3d5a3ae394739c1bd5b619e1a004885cf0 k8s.gcr.io/e2e-test-images/agnhost:2.20],SizeBytes:113869866,},ContainerImage{Names:[k8s.gcr.io/kube-controller-manager@sha256:2769005fb667dbb936009894d01fe35f5ce1bce45eee80a9ce3c139b9be4080e k8s.gcr.io/kube-controller-manager:v1.19.8],SizeBytes:110805342,},ContainerImage{Names:[gcr.io/k8s-staging-nfd/node-feature-discovery@sha256:5d116c2c340be665a2c8adc9aca7f91396bd5cbde4add4fdc8dab95d8db43425 gcr.io/k8s-staging-nfd/node-feature-discovery:v0.7.0],SizeBytes:108309584,},ContainerImage{Names:[gcr.io/kubernetes-e2e-test-images/sample-apiserver@sha256:ff02aacd9766d597883fabafc7ad604c719a57611db1bcc1564c69a45b000a55 gcr.io/kubernetes-e2e-test-images/sample-apiserver:1.17],SizeBytes:60684726,},ContainerImage{Names:[quay.io/coreos/flannel@sha256:34860ea294a018d392e61936f19a7862d5e92039d196cac9176da14b2bbd0fe3 quay.io/coreos/flannel@sha256:ac5322604bcab484955e6dbc507f45a906bde79046667322e3918a8578ab08c8 quay.io/coreos/flannel:v0.13.0 quay.io/coreos/flannel:v0.13.0-amd64],SizeBytes:57156911,},ContainerImage{Names:[quay.io/coreos/kube-rbac-proxy@sha256:e10d1d982dd653db74ca87a1d1ad017bc5ef1aeb651bdea089debf16485b080b quay.io/coreos/kube-rbac-proxy:v0.5.0],SizeBytes:46626428,},ContainerImage{Names:[k8s.gcr.io/kube-scheduler@sha256:bb66135ce9a25ac405e43bbae6a2ac766e0efcac0a6a73ef9d1fbb4cf4732c9b k8s.gcr.io/kube-scheduler:v1.19.8],SizeBytes:46510430,},ContainerImage{Names:[localhost:30500/sriov-device-plugin@sha256:bae53f2ec899d23f9342d730c376a1ee3805e96fd1e5e4857e65085e6529557d localhost:30500/sriov-device-plugin:v3.3.1],SizeBytes:44392820,},ContainerImage{Names:[gcr.io/kubernetes-e2e-test-images/nonroot@sha256:4bd7ae247de5c988700233c5a4b55e804ffe90f8c66ae64853f1dae37b847213 gcr.io/kubernetes-e2e-test-images/nonroot:1.0],SizeBytes:42321438,},ContainerImage{Names:[quay.io/prometheus/node-exporter@sha256:a2f29256e53cc3e0b64d7a472512600b2e9410347d53cdc85b49f659c17e02ee quay.io/prometheus/node-exporter:v0.18.1],SizeBytes:22933477,},ContainerImage{Names:[localhost:30500/tas-controller@sha256:09461cf1b75776eb7d277a89d3a624c9eea355bf2ab1d8abbe45c40df99de268 localhost:30500/tas-controller:0.1],SizeBytes:22922439,},ContainerImage{Names:[localhost:30500/tas-extender@sha256:5b4ebd3c9985a2f36839e18a8b7c84e4d1deb89fa486247108510c71673efe12 localhost:30500/tas-extender:0.1],SizeBytes:21320903,},ContainerImage{Names:[prom/collectd-exporter@sha256:73fbda4d24421bff3b741c27efc36f1b6fbe7c57c378d56d4ff78101cd556654],SizeBytes:17463681,},ContainerImage{Names:[nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 nginx:1.14-alpine],SizeBytes:16032814,},ContainerImage{Names:[gcr.io/google-samples/hello-go-gke@sha256:4ea9cd3d35f81fc91bdebca3fae50c180a1048be0613ad0f811595365040396e gcr.io/google-samples/hello-go-gke:1.0],SizeBytes:11443478,},ContainerImage{Names:[gcr.io/kubernetes-e2e-test-images/nonewprivs@sha256:10066e9039219449fe3c81f38fe01928f87914150768ab81b62a468e51fa7411 gcr.io/kubernetes-e2e-test-images/nonewprivs:1.0],SizeBytes:6757579,},ContainerImage{Names:[gcr.io/kubernetes-e2e-test-images/nautilus@sha256:33a732d4c42a266912a5091598a0f07653c9134db4b8d571690d8afd509e0bfc gcr.io/kubernetes-e2e-test-images/nautilus:1.0],SizeBytes:4753501,},ContainerImage{Names:[busybox@sha256:8ccbac733d19c0dd4d70b4f0c1e12245b5fa3ad24758a11035ee505c629c0796 busybox:1.29],SizeBytes:1154361,},ContainerImage{Names:[busybox@sha256:141c253bc4c3fd0a201d32dc1f493bcf3fff003b6df416dea4f41046e0f37d47 busybox:1.28],SizeBytes:1146369,},ContainerImage{Names:[k8s.gcr.io/pause@sha256:927d98197ec1141a368550822d18fa1c60bdae27b78b0c004f705f548c07814f k8s.gcr.io/pause:3.2],SizeBytes:682696,},ContainerImage{Names:[k8s.gcr.io/pause@sha256:a319ac2280eb7e3a59e252e54b76327cb4a33cf8389053b0d78277f22bbca2fa k8s.gcr.io/pause:3.3],SizeBytes:682696,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},}&#xA;May  7 23:26:51.499: INFO: &#xA;Logging kubelet events for node node2&#xA;May  7 23:26:51.501: INFO: &#xA;Logging pods the kubelet thinks is on node node2&#xA;May  7 23:26:51.521: INFO: backofflimit-bpn7x started at 2021-05-07 23:26:12 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container c ready: false, restart count 0&#xA;May  7 23:26:51.521: INFO: kube-proxy-rgw7h started at 2021-05-07 20:01:27 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container kube-proxy ready: true, restart count 1&#xA;May  7 23:26:51.521: INFO: webserver-7ccc6798d4-vhdp2 started at 2021-05-07 23:21:49 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container httpd ready: false, restart count 0&#xA;May  7 23:26:51.521: INFO: all-succeed-2p7h6 started at 2021-05-07 23:25:11 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container c ready: false, restart count 0&#xA;May  7 23:26:51.521: INFO: all-pods-removed-vdmmk started at 2021-05-07 23:26:22 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container c ready: false, restart count 0&#xA;May  7 23:26:51.521: INFO: node-feature-discovery-worker-wp5n6 started at 2021-05-07 20:08:19 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container nfd-worker ready: true, restart count 0&#xA;May  7 23:26:51.521: INFO: node-exporter-4bcls started at 2021-05-07 20:12:42 +0000 UTC (0+2 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container kube-rbac-proxy ready: true, restart count 0&#xA;May  7 23:26:51.521: INFO: &#x9;Container node-exporter ready: true, restart count 0&#xA;May  7 23:26:51.521: INFO: concurrent-1620429720-6qzrf started at 2021-05-07 23:22:08 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container c ready: false, restart count 0&#xA;May  7 23:26:51.521: INFO: webserver-7ccc6798d4-xsdt7 started at 2021-05-07 23:21:43 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container httpd ready: false, restart count 0&#xA;May  7 23:26:51.521: INFO: all-succeed-jmg2r started at 2021-05-07 23:25:11 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container c ready: false, restart count 0&#xA;May  7 23:26:51.521: INFO: rs-wv6hw started at 2021-05-07 23:26:21 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container donothing ready: true, restart count 0&#xA;May  7 23:26:51.521: INFO: kube-flannel-htqkx started at 2021-05-07 20:02:02 +0000 UTC (1+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Init container install-cni ready: true, restart count 2&#xA;May  7 23:26:51.521: INFO: &#x9;Container kube-flannel ready: true, restart count 2&#xA;May  7 23:26:51.521: INFO: kube-multus-ds-amd64-g98hm started at 2021-05-07 20:02:10 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container kube-multus ready: true, restart count 1&#xA;May  7 23:26:51.521: INFO: tas-telemetry-aware-scheduling-575ccbc9d4-8z46f started at 2021-05-07 20:15:36 +0000 UTC (0+2 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container tas-controller ready: true, restart count 0&#xA;May  7 23:26:51.521: INFO: &#x9;Container tas-extender ready: true, restart count 0&#xA;May  7 23:26:51.521: INFO: webserver-69b69768db-wmwtc started at 2021-05-07 23:21:49 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container httpd ready: false, restart count 0&#xA;May  7 23:26:51.521: INFO: fail-once-non-local-wjkkp started at 2021-05-07 23:20:51 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container c ready: false, restart count 0&#xA;May  7 23:26:51.521: INFO: webserver-69b69768db-6q8t9 started at 2021-05-07 23:21:25 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container httpd ready: false, restart count 0&#xA;May  7 23:26:51.521: INFO: kubernetes-dashboard-86c6f9df5b-k9cj2 started at 2021-05-07 20:02:35 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container kubernetes-dashboard ready: true, restart count 1&#xA;May  7 23:26:51.521: INFO: cmk-init-discover-node2-kd9gg started at 2021-05-07 20:11:26 +0000 UTC (0+3 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container discover ready: false, restart count 0&#xA;May  7 23:26:51.521: INFO: &#x9;Container init ready: false, restart count 0&#xA;May  7 23:26:51.521: INFO: &#x9;Container install ready: false, restart count 0&#xA;May  7 23:26:51.521: INFO: cmk-gvh7j started at 2021-05-07 20:11:49 +0000 UTC (0+2 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container nodereport ready: true, restart count 0&#xA;May  7 23:26:51.521: INFO: &#x9;Container reconcile ready: true, restart count 0&#xA;May  7 23:26:51.521: INFO: cmk-webhook-6c9d5f8578-94s58 started at 2021-05-07 20:11:49 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container cmk-webhook ready: true, restart count 0&#xA;May  7 23:26:51.521: INFO: collectd-p5gbt started at 2021-05-07 20:18:33 +0000 UTC (0+3 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container collectd ready: true, restart count 0&#xA;May  7 23:26:51.521: INFO: &#x9;Container collectd-exporter ready: true, restart count 0&#xA;May  7 23:26:51.521: INFO: &#x9;Container rbac-proxy ready: true, restart count 0&#xA;May  7 23:26:51.521: INFO: concurrent-1620429900-drhj4 started at 2021-05-07 23:25:08 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container c ready: false, restart count 0&#xA;May  7 23:26:51.521: INFO: all-pods-removed-2lnb5 started at 2021-05-07 23:26:22 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container c ready: false, restart count 0&#xA;May  7 23:26:51.521: INFO: nginx-proxy-node2 started at 2021-05-07 20:07:46 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container nginx-proxy ready: true, restart count 2&#xA;May  7 23:26:51.521: INFO: sriov-net-dp-kube-sriov-device-plugin-amd64-tkw8z started at 2021-05-07 20:09:23 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container kube-sriovdp ready: true, restart count 0&#xA;May  7 23:26:51.521: INFO: fail-once-non-local-46fmq started at 2021-05-07 23:20:51 +0000 UTC (0+1 container statuses recorded)&#xA;May  7 23:26:51.521: INFO: &#x9;Container c ready: false, restart count 0&#xA;W0507 23:26:51.535052      36 metrics_grabber.go:105] Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled.&#xA;May  7 23:26:51.584: INFO: &#xA;Latency metrics for node node2&#xA;May  7 23:26:51.584: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready&#xA;STEP: Destroying namespace &#34;deployment-6704&#34; for this suite.&#xA;</system-out>
      </testcase>
  </testsuite>