=== RUN TestPause/serial/PauseAgain
pause_test.go:107: (dbg) Run: out/minikube-linux-amd64 pause -p pause-20210813001951-679351 --alsologtostderr -v=5
=== CONT TestPause/serial/PauseAgain
pause_test.go:107: (dbg) Non-zero exit: out/minikube-linux-amd64 pause -p pause-20210813001951-679351 --alsologtostderr -v=5: exit status 80 (6.266083141s)
-- stdout --
* Pausing node pause-20210813001951-679351 ...
-- /stdout --
** stderr **
I0813 00:24:03.823623 717142 out.go:298] Setting OutFile to fd 1 ...
I0813 00:24:03.823843 717142 out.go:345] TERM=,COLORTERM=, which probably does not support color
I0813 00:24:03.823857 717142 out.go:311] Setting ErrFile to fd 2...
I0813 00:24:03.823862 717142 out.go:345] TERM=,COLORTERM=, which probably does not support color
I0813 00:24:03.823996 717142 root.go:313] Updating PATH: /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/bin
I0813 00:24:03.824254 717142 out.go:305] Setting JSON to false
I0813 00:24:03.824290 717142 mustload.go:65] Loading cluster: pause-20210813001951-679351
I0813 00:24:03.825243 717142 main.go:130] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_containerd_integration/out/docker-machine-driver-kvm2
I0813 00:24:03.825303 717142 main.go:130] libmachine: Launching plugin server for driver kvm2
I0813 00:24:03.840435 717142 main.go:130] libmachine: Plugin server listening at address 127.0.0.1:46807
I0813 00:24:03.840994 717142 main.go:130] libmachine: () Calling .GetVersion
I0813 00:24:03.841673 717142 main.go:130] libmachine: Using API Version 1
I0813 00:24:03.841698 717142 main.go:130] libmachine: () Calling .SetConfigRaw
I0813 00:24:03.842109 717142 main.go:130] libmachine: () Calling .GetMachineName
I0813 00:24:03.842328 717142 main.go:130] libmachine: (pause-20210813001951-679351) Calling .GetState
I0813 00:24:03.845846 717142 host.go:66] Checking if "pause-20210813001951-679351" exists ...
I0813 00:24:03.846204 717142 main.go:130] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_containerd_integration/out/docker-machine-driver-kvm2
I0813 00:24:03.846247 717142 main.go:130] libmachine: Launching plugin server for driver kvm2
I0813 00:24:03.859125 717142 main.go:130] libmachine: Plugin server listening at address 127.0.0.1:46749
I0813 00:24:03.859721 717142 main.go:130] libmachine: () Calling .GetVersion
I0813 00:24:03.860239 717142 main.go:130] libmachine: Using API Version 1
I0813 00:24:03.860264 717142 main.go:130] libmachine: () Calling .SetConfigRaw
I0813 00:24:03.860640 717142 main.go:130] libmachine: () Calling .GetMachineName
I0813 00:24:03.860799 717142 main.go:130] libmachine: (pause-20210813001951-679351) Calling .DriverName
I0813 00:24:03.861566 717142 pause.go:58] "namespaces" [kube-system kubernetes-dashboard storage-gluster istio-operator]="keys" map[addons:[] all:%!s(bool=false) apiserver-ips:[] apiserver-name:minikubeCA apiserver-names:[] apiserver-port:%!s(int=8443) auto-update-drivers:%!s(bool=true) base-image:gcr.io/k8s-minikube/kicbase:v0.0.25@sha256:6f936e3443b95cd918d77623bf7b595653bb382766e280290a02b4a349e88b79 bootstrapper:kubeadm cache-images:%!s(bool=true) cancel-scheduled:%!s(bool=false) cni: container-runtime:docker cpus:2 cri-socket: delete-on-failure:%!s(bool=false) disable-driver-mounts:%!s(bool=false) disk-size:20000mb dns-domain:cluster.local dns-proxy:%!s(bool=false) docker-env:[] docker-opt:[] download-only:%!s(bool=false) driver: dry-run:%!s(bool=false) embed-certs:%!s(bool=false) embedcerts:%!s(bool=false) enable-default-cni:%!s(bool=false) extra-config: extra-disks:%!s(int=0) feature-gates: force:%!s(bool=false) force-systemd:%!s(bool=false) host-dns-resolver:%!s(bool=true) host-only-cidr:192
.168.99.1/24 host-only-nic-type:virtio hyperkit-vpnkit-sock: hyperkit-vsock-ports:[] hyperv-external-adapter: hyperv-use-external-switch:%!s(bool=false) hyperv-virtual-switch: image-mirror-country: image-repository: insecure-registry:[] install-addons:%!s(bool=true) interactive:%!s(bool=true) iso-url:[https://storage.googleapis.com/minikube-builds/iso/12122/minikube-v1.22.0-1628238775-12122.iso https://github.com/kubernetes/minikube/releases/download/v1.22.0-1628238775-12122/minikube-v1.22.0-1628238775-12122.iso https://kubernetes.oss-cn-hangzhou.aliyuncs.com/minikube/iso/minikube-v1.22.0-1628238775-12122.iso] keep-context:%!s(bool=false) keep-context-active:%!s(bool=false) kubernetes-version: kvm-gpu:%!s(bool=false) kvm-hidden:%!s(bool=false) kvm-network:default kvm-numa-count:%!s(int=1) kvm-qemu-uri:qemu:///system listen-address: memory: mount:%!s(bool=false) mount-string:/home/jenkins:/minikube-host namespace:default nat-nic-type:virtio native-ssh:%!s(bool=true) network: network-plugin: nfs-share:[] nfs-sh
ares-root:/nfsshares no-vtx-check:%!s(bool=false) nodes:%!s(int=1) output:text ports:[] preload:%!s(bool=true) profile:pause-20210813001951-679351 purge:%!s(bool=false) registry-mirror:[] reminderwaitperiodinhours:%!s(int=24) schedule:0s service-cluster-ip-range:10.96.0.0/12 ssh-ip-address: ssh-key: ssh-port:%!s(int=22) ssh-user:root trace: user: uuid: vm:%!s(bool=false) vm-driver: wait:[apiserver system_pods] wait-timeout:6m0s wantnonedriverwarning:%!s(bool=true) wantupdatenotification:%!s(bool=true) wantvirtualboxdriverwarning:%!s(bool=true)]="(MISSING)"
I0813 00:24:04.557047 717142 out.go:177] * Pausing node pause-20210813001951-679351 ...
I0813 00:24:04.557096 717142 host.go:66] Checking if "pause-20210813001951-679351" exists ...
I0813 00:24:04.557478 717142 main.go:130] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_containerd_integration/out/docker-machine-driver-kvm2
I0813 00:24:04.557547 717142 main.go:130] libmachine: Launching plugin server for driver kvm2
I0813 00:24:04.572028 717142 main.go:130] libmachine: Plugin server listening at address 127.0.0.1:35585
I0813 00:24:04.572591 717142 main.go:130] libmachine: () Calling .GetVersion
I0813 00:24:04.573279 717142 main.go:130] libmachine: Using API Version 1
I0813 00:24:04.573309 717142 main.go:130] libmachine: () Calling .SetConfigRaw
I0813 00:24:04.573777 717142 main.go:130] libmachine: () Calling .GetMachineName
I0813 00:24:04.573975 717142 main.go:130] libmachine: (pause-20210813001951-679351) Calling .DriverName
I0813 00:24:04.574215 717142 ssh_runner.go:149] Run: systemctl --version
I0813 00:24:04.574245 717142 main.go:130] libmachine: (pause-20210813001951-679351) Calling .GetSSHHostname
I0813 00:24:04.581466 717142 main.go:130] libmachine: (pause-20210813001951-679351) DBG | domain pause-20210813001951-679351 has defined MAC address 52:54:00:82:3b:c7 in network mk-pause-20210813001951-679351
I0813 00:24:04.581887 717142 main.go:130] libmachine: (pause-20210813001951-679351) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:82:3b:c7", ip: ""} in network mk-pause-20210813001951-679351: {Iface:virbr11 ExpiryTime:2021-08-13 01:20:54 +0000 UTC Type:0 Mac:52:54:00:82:3b:c7 Iaid: IPaddr:192.168.127.196 Prefix:24 Hostname:pause-20210813001951-679351 Clientid:01:52:54:00:82:3b:c7}
I0813 00:24:04.581918 717142 main.go:130] libmachine: (pause-20210813001951-679351) DBG | domain pause-20210813001951-679351 has defined IP address 192.168.127.196 and MAC address 52:54:00:82:3b:c7 in network mk-pause-20210813001951-679351
I0813 00:24:04.582082 717142 main.go:130] libmachine: (pause-20210813001951-679351) Calling .GetSSHPort
I0813 00:24:04.582277 717142 main.go:130] libmachine: (pause-20210813001951-679351) Calling .GetSSHKeyPath
I0813 00:24:04.582444 717142 main.go:130] libmachine: (pause-20210813001951-679351) Calling .GetSSHUsername
I0813 00:24:04.582585 717142 sshutil.go:53] new ssh client: &{IP:192.168.127.196 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/machines/pause-20210813001951-679351/id_rsa Username:docker}
I0813 00:24:04.710149 717142 ssh_runner.go:149] Run: sudo systemctl is-active --quiet service kubelet
I0813 00:24:04.729788 717142 pause.go:50] kubelet running: true
I0813 00:24:04.729846 717142 ssh_runner.go:149] Run: sudo systemctl disable --now kubelet
I0813 00:24:07.264937 717142 ssh_runner.go:189] Completed: sudo systemctl disable --now kubelet: (2.535062871s)
I0813 00:24:07.265005 717142 cri.go:41] listing CRI containers in root /run/containerd/runc/k8s.io: {State:running Name: Namespaces:[kube-system kubernetes-dashboard storage-gluster istio-operator]}
I0813 00:24:07.265084 717142 ssh_runner.go:149] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system; crictl ps -a --quiet --label io.kubernetes.pod.namespace=kubernetes-dashboard; crictl ps -a --quiet --label io.kubernetes.pod.namespace=storage-gluster; crictl ps -a --quiet --label io.kubernetes.pod.namespace=istio-operator"
I0813 00:24:07.390746 717142 cri.go:76] found id: "a4a4ec1132e56e93b77cf82333943e5daaab8efa48df65ced586f5a324cde37d"
I0813 00:24:07.390787 717142 cri.go:76] found id: "7899222a89a71ffe4fb8d232a5b3799b5c94ea7e430406ce44654a4c80946ed9"
I0813 00:24:07.390795 717142 cri.go:76] found id: "d30696c5405d8e3fbc2bfe7ef7e391b98c301d6056d08f1d32a9614f101edc6f"
I0813 00:24:07.390801 717142 cri.go:76] found id: "6204b21ab9c2c6e799da39874e9eb93e39284c65231dd05603088db2fa6b8e6b"
I0813 00:24:07.390806 717142 cri.go:76] found id: "705c524b7bd2d071e133ec74fb1f433cab624312145e8e0d2e4b19e7936be85b"
I0813 00:24:07.390812 717142 cri.go:76] found id: "64c935095bced983323337af866a47ac732cfe3496f8dfd31387b8833f7cc6c0"
I0813 00:24:07.390819 717142 cri.go:76] found id: "85bd885bbae1eb206d16fda70dde9e78726f0563495bc0c5f64cf2083b9a7bf7"
I0813 00:24:07.390824 717142 cri.go:76] found id: "9cb0b80b9734ad17c461045a38b0d0a6b8d8686ef46bf8dd99a00d62b8cc67fe"
I0813 00:24:07.390830 717142 cri.go:76] found id: "efb6b8992aa826974c98985c6dbeb065b7a93d6ceeacacae98b06ec18bbfd5bb"
I0813 00:24:07.390842 717142 cri.go:76] found id: "c11b8a977685bc2516a3a180b7d7e5a078649d5b9c68db67af64bdbf0438193c"
I0813 00:24:07.390854 717142 cri.go:76] found id: "2efebee19d7a6bd77fd1333dab2cc543c575e8c6babdae865b90a1cf0fa48744"
I0813 00:24:07.390859 717142 cri.go:76] found id: "3874ae5baf2d856570fa5534f52778b464323afbd92eca54de5a983517dbbb65"
I0813 00:24:07.390864 717142 cri.go:76] found id: "3afc8c09f828616463f8d4246cdb7a602c45569e04de078f3b507b5df49993e8"
I0813 00:24:07.390871 717142 cri.go:76] found id: ""
I0813 00:24:07.390930 717142 ssh_runner.go:149] Run: sudo runc --root /run/containerd/runc/k8s.io list -f json
I0813 00:24:07.430219 717142 cri.go:103] JSON = [{"ociVersion":"1.0.2-dev","id":"2c9091a6bd8d73b0f55e45a516ab950e484c7e80697c4dbbcdf6f65909dafc9c","pid":3922,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/2c9091a6bd8d73b0f55e45a516ab950e484c7e80697c4dbbcdf6f65909dafc9c","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/2c9091a6bd8d73b0f55e45a516ab950e484c7e80697c4dbbcdf6f65909dafc9c/rootfs","created":"2021-08-13T00:23:14.025881684Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-id":"2c9091a6bd8d73b0f55e45a516ab950e484c7e80697c4dbbcdf6f65909dafc9c","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_kube-scheduler-pause-20210813001951-679351_827c3abded97b0f25c66fba9223b4c18"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"46f36163c53e1f9abe41a73a7c5bcd16715af4061345c6bf366e4d73e9771238","pid":4047,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/46f36163c53e1f9ab
e41a73a7c5bcd16715af4061345c6bf366e4d73e9771238","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/46f36163c53e1f9abe41a73a7c5bcd16715af4061345c6bf366e4d73e9771238/rootfs","created":"2021-08-13T00:23:15.111504623Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-id":"46f36163c53e1f9abe41a73a7c5bcd16715af4061345c6bf366e4d73e9771238","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_kube-proxy-2mkpr_59d9290e-34c7-4e80-a909-8d989552ec78"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"4a6e63928b11fedafa692c25459f3246b10afd3d4379e021576f2665ed408565","pid":4657,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/4a6e63928b11fedafa692c25459f3246b10afd3d4379e021576f2665ed408565","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/4a6e63928b11fedafa692c25459f3246b10afd3d4379e021576f2665ed408565/rootfs","created":"2021-08-13T00:23:30.022843176Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","
io.kubernetes.cri.sandbox-id":"4a6e63928b11fedafa692c25459f3246b10afd3d4379e021576f2665ed408565","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_storage-provisioner_b781b362-9644-4c96-a463-4cb61bc5ab58"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"6204b21ab9c2c6e799da39874e9eb93e39284c65231dd05603088db2fa6b8e6b","pid":4425,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/6204b21ab9c2c6e799da39874e9eb93e39284c65231dd05603088db2fa6b8e6b","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/6204b21ab9c2c6e799da39874e9eb93e39284c65231dd05603088db2fa6b8e6b/rootfs","created":"2021-08-13T00:23:18.323409933Z","annotations":{"io.kubernetes.cri.container-name":"kube-proxy","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.sandbox-id":"46f36163c53e1f9abe41a73a7c5bcd16715af4061345c6bf366e4d73e9771238"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"64c935095bced983323337af866a47ac732cfe3496f8dfd31387b8833f7cc6c0","pid":4306,"status":"run
ning","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/64c935095bced983323337af866a47ac732cfe3496f8dfd31387b8833f7cc6c0","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/64c935095bced983323337af866a47ac732cfe3496f8dfd31387b8833f7cc6c0/rootfs","created":"2021-08-13T00:23:16.307393022Z","annotations":{"io.kubernetes.cri.container-name":"kube-scheduler","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.sandbox-id":"2c9091a6bd8d73b0f55e45a516ab950e484c7e80697c4dbbcdf6f65909dafc9c"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"705c524b7bd2d071e133ec74fb1f433cab624312145e8e0d2e4b19e7936be85b","pid":4402,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/705c524b7bd2d071e133ec74fb1f433cab624312145e8e0d2e4b19e7936be85b","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/705c524b7bd2d071e133ec74fb1f433cab624312145e8e0d2e4b19e7936be85b/rootfs","created":"2021-08-13T00:23:17.653178958Z","annotations":{"io.kubernetes.cri.container-n
ame":"etcd","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.sandbox-id":"accc3895e9638677ea28570ea79a089ad8ba2c46e4ceb8dd1f082776f96062e7"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"7899222a89a71ffe4fb8d232a5b3799b5c94ea7e430406ce44654a4c80946ed9","pid":4694,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/7899222a89a71ffe4fb8d232a5b3799b5c94ea7e430406ce44654a4c80946ed9","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/7899222a89a71ffe4fb8d232a5b3799b5c94ea7e430406ce44654a4c80946ed9/rootfs","created":"2021-08-13T00:23:30.735130787Z","annotations":{"io.kubernetes.cri.container-name":"storage-provisioner","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.sandbox-id":"4a6e63928b11fedafa692c25459f3246b10afd3d4379e021576f2665ed408565"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"85bd885bbae1eb206d16fda70dde9e78726f0563495bc0c5f64cf2083b9a7bf7","pid":4289,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.t
ask/k8s.io/85bd885bbae1eb206d16fda70dde9e78726f0563495bc0c5f64cf2083b9a7bf7","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/85bd885bbae1eb206d16fda70dde9e78726f0563495bc0c5f64cf2083b9a7bf7/rootfs","created":"2021-08-13T00:23:16.74740056Z","annotations":{"io.kubernetes.cri.container-name":"kube-apiserver","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.sandbox-id":"bb4e7930b2ada4f1ec65421ed5d4b0b59b255e285b95240391d8d4e71d344e95"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"a4a4ec1132e56e93b77cf82333943e5daaab8efa48df65ced586f5a324cde37d","pid":4764,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/a4a4ec1132e56e93b77cf82333943e5daaab8efa48df65ced586f5a324cde37d","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/a4a4ec1132e56e93b77cf82333943e5daaab8efa48df65ced586f5a324cde37d/rootfs","created":"2021-08-13T00:23:45.351615619Z","annotations":{"io.kubernetes.cri.container-name":"kube-controller-manager","io.kubernetes.cri.container
-type":"container","io.kubernetes.cri.sandbox-id":"a8ae77b235803b4e19e0eb0a6e8e4d70a30100102282506be869694a0b95d264"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"a8ae77b235803b4e19e0eb0a6e8e4d70a30100102282506be869694a0b95d264","pid":3973,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/a8ae77b235803b4e19e0eb0a6e8e4d70a30100102282506be869694a0b95d264","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/a8ae77b235803b4e19e0eb0a6e8e4d70a30100102282506be869694a0b95d264/rootfs","created":"2021-08-13T00:23:14.162109109Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-id":"a8ae77b235803b4e19e0eb0a6e8e4d70a30100102282506be869694a0b95d264","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_kube-controller-manager-pause-20210813001951-679351_f90cf81553c0bced79ef2f705df65c51"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"accc3895e9638677ea28570ea79a089ad8ba2c46e4ceb8dd1f082776f96062e7","pid":4034,"status":"runni
ng","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/accc3895e9638677ea28570ea79a089ad8ba2c46e4ceb8dd1f082776f96062e7","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/accc3895e9638677ea28570ea79a089ad8ba2c46e4ceb8dd1f082776f96062e7/rootfs","created":"2021-08-13T00:23:14.597314012Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-id":"accc3895e9638677ea28570ea79a089ad8ba2c46e4ceb8dd1f082776f96062e7","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_etcd-pause-20210813001951-679351_ae2af969af5dffce0131cf735702505a"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"bb4e7930b2ada4f1ec65421ed5d4b0b59b255e285b95240391d8d4e71d344e95","pid":4024,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/bb4e7930b2ada4f1ec65421ed5d4b0b59b255e285b95240391d8d4e71d344e95","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/bb4e7930b2ada4f1ec65421ed5d4b0b59b255e285b95240391d8d4e71d344e95/rootfs","created
":"2021-08-13T00:23:14.465535488Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-id":"bb4e7930b2ada4f1ec65421ed5d4b0b59b255e285b95240391d8d4e71d344e95","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_kube-apiserver-pause-20210813001951-679351_058544a8a5810508caf0af791704b304"},"owner":"root"},{"ociVersion":"1.0.2-dev","id":"d30696c5405d8e3fbc2bfe7ef7e391b98c301d6056d08f1d32a9614f101edc6f","pid":4465,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/d30696c5405d8e3fbc2bfe7ef7e391b98c301d6056d08f1d32a9614f101edc6f","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/d30696c5405d8e3fbc2bfe7ef7e391b98c301d6056d08f1d32a9614f101edc6f/rootfs","created":"2021-08-13T00:23:18.584370342Z","annotations":{"io.kubernetes.cri.container-name":"coredns","io.kubernetes.cri.container-type":"container","io.kubernetes.cri.sandbox-id":"f5e6ffa407fcd7bad9fbd18bd48a2b0e8ac87614f276855e14b1c4ee6603854c"},"owner":"root"},{"ociVersi
on":"1.0.2-dev","id":"f5e6ffa407fcd7bad9fbd18bd48a2b0e8ac87614f276855e14b1c4ee6603854c","pid":4322,"status":"running","bundle":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/f5e6ffa407fcd7bad9fbd18bd48a2b0e8ac87614f276855e14b1c4ee6603854c","rootfs":"/run/containerd/io.containerd.runtime.v2.task/k8s.io/f5e6ffa407fcd7bad9fbd18bd48a2b0e8ac87614f276855e14b1c4ee6603854c/rootfs","created":"2021-08-13T00:23:16.810022574Z","annotations":{"io.kubernetes.cri.container-type":"sandbox","io.kubernetes.cri.sandbox-id":"f5e6ffa407fcd7bad9fbd18bd48a2b0e8ac87614f276855e14b1c4ee6603854c","io.kubernetes.cri.sandbox-log-directory":"/var/log/pods/kube-system_coredns-558bd4d5db-xjmwl_5897a243-0289-4042-882a-d25cb005813b"},"owner":"root"}]
I0813 00:24:07.430481 717142 cri.go:113] list returned 14 containers
I0813 00:24:07.430498 717142 cri.go:116] container: {ID:2c9091a6bd8d73b0f55e45a516ab950e484c7e80697c4dbbcdf6f65909dafc9c Status:running}
I0813 00:24:07.430517 717142 cri.go:118] skipping 2c9091a6bd8d73b0f55e45a516ab950e484c7e80697c4dbbcdf6f65909dafc9c - not in ps
I0813 00:24:07.430526 717142 cri.go:116] container: {ID:46f36163c53e1f9abe41a73a7c5bcd16715af4061345c6bf366e4d73e9771238 Status:running}
I0813 00:24:07.430534 717142 cri.go:118] skipping 46f36163c53e1f9abe41a73a7c5bcd16715af4061345c6bf366e4d73e9771238 - not in ps
I0813 00:24:07.430540 717142 cri.go:116] container: {ID:4a6e63928b11fedafa692c25459f3246b10afd3d4379e021576f2665ed408565 Status:running}
I0813 00:24:07.430547 717142 cri.go:118] skipping 4a6e63928b11fedafa692c25459f3246b10afd3d4379e021576f2665ed408565 - not in ps
I0813 00:24:07.430554 717142 cri.go:116] container: {ID:6204b21ab9c2c6e799da39874e9eb93e39284c65231dd05603088db2fa6b8e6b Status:running}
I0813 00:24:07.430561 717142 cri.go:116] container: {ID:64c935095bced983323337af866a47ac732cfe3496f8dfd31387b8833f7cc6c0 Status:running}
I0813 00:24:07.430568 717142 cri.go:116] container: {ID:705c524b7bd2d071e133ec74fb1f433cab624312145e8e0d2e4b19e7936be85b Status:running}
I0813 00:24:07.430573 717142 cri.go:116] container: {ID:7899222a89a71ffe4fb8d232a5b3799b5c94ea7e430406ce44654a4c80946ed9 Status:running}
I0813 00:24:07.430582 717142 cri.go:116] container: {ID:85bd885bbae1eb206d16fda70dde9e78726f0563495bc0c5f64cf2083b9a7bf7 Status:running}
I0813 00:24:07.430590 717142 cri.go:116] container: {ID:a4a4ec1132e56e93b77cf82333943e5daaab8efa48df65ced586f5a324cde37d Status:running}
I0813 00:24:07.430598 717142 cri.go:116] container: {ID:a8ae77b235803b4e19e0eb0a6e8e4d70a30100102282506be869694a0b95d264 Status:running}
I0813 00:24:07.430605 717142 cri.go:118] skipping a8ae77b235803b4e19e0eb0a6e8e4d70a30100102282506be869694a0b95d264 - not in ps
I0813 00:24:07.430611 717142 cri.go:116] container: {ID:accc3895e9638677ea28570ea79a089ad8ba2c46e4ceb8dd1f082776f96062e7 Status:running}
I0813 00:24:07.430628 717142 cri.go:118] skipping accc3895e9638677ea28570ea79a089ad8ba2c46e4ceb8dd1f082776f96062e7 - not in ps
I0813 00:24:07.430636 717142 cri.go:116] container: {ID:bb4e7930b2ada4f1ec65421ed5d4b0b59b255e285b95240391d8d4e71d344e95 Status:running}
I0813 00:24:07.430643 717142 cri.go:118] skipping bb4e7930b2ada4f1ec65421ed5d4b0b59b255e285b95240391d8d4e71d344e95 - not in ps
I0813 00:24:07.430649 717142 cri.go:116] container: {ID:d30696c5405d8e3fbc2bfe7ef7e391b98c301d6056d08f1d32a9614f101edc6f Status:running}
I0813 00:24:07.430654 717142 cri.go:116] container: {ID:f5e6ffa407fcd7bad9fbd18bd48a2b0e8ac87614f276855e14b1c4ee6603854c Status:running}
I0813 00:24:07.430658 717142 cri.go:118] skipping f5e6ffa407fcd7bad9fbd18bd48a2b0e8ac87614f276855e14b1c4ee6603854c - not in ps
I0813 00:24:07.430708 717142 ssh_runner.go:149] Run: sudo runc --root /run/containerd/runc/k8s.io pause 6204b21ab9c2c6e799da39874e9eb93e39284c65231dd05603088db2fa6b8e6b
I0813 00:24:07.452932 717142 ssh_runner.go:149] Run: sudo runc --root /run/containerd/runc/k8s.io pause 64c935095bced983323337af866a47ac732cfe3496f8dfd31387b8833f7cc6c0
I0813 00:24:07.474067 717142 ssh_runner.go:149] Run: sudo runc --root /run/containerd/runc/k8s.io pause 705c524b7bd2d071e133ec74fb1f433cab624312145e8e0d2e4b19e7936be85b
I0813 00:24:09.120445 717142 out.go:177]
W0813 00:24:09.120726 717142 out.go:242] X Exiting due to GUEST_PAUSE: runc: sudo runc --root /run/containerd/runc/k8s.io pause 705c524b7bd2d071e133ec74fb1f433cab624312145e8e0d2e4b19e7936be85b: Process exited with status 1
stdout:
stderr:
time="2021-08-13T00:24:07Z" level=error msg="unable to freeze"
X Exiting due to GUEST_PAUSE: runc: sudo runc --root /run/containerd/runc/k8s.io pause 705c524b7bd2d071e133ec74fb1f433cab624312145e8e0d2e4b19e7936be85b: Process exited with status 1
stdout:
stderr:
time="2021-08-13T00:24:07Z" level=error msg="unable to freeze"
W0813 00:24:09.120749 717142 out.go:242] *
*
[warning]: invalid value provided to Color, using default
[warning]: invalid value provided to Color, using default
[warning]: invalid value provided to Color, using default
[warning]: invalid value provided to Color, using default
[warning]: invalid value provided to Color, using default
[warning]: invalid value provided to Color, using default
[warning]: invalid value provided to Color, using default
[warning]: invalid value provided to Color, using default
W0813 00:24:09.159055 717142 out.go:242] ╭──────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please attach the following file to the GitHub issue: │
│ * - /tmp/minikube_pause_49fdaea37aad8ebccb761973c21590cc64efe8d9_0.log │
│ │
╰──────────────────────────────────────────────────────────────────────────────╯
╭──────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please attach the following file to the GitHub issue: │
│ * - /tmp/minikube_pause_49fdaea37aad8ebccb761973c21590cc64efe8d9_0.log │
│ │
╰──────────────────────────────────────────────────────────────────────────────╯
I0813 00:24:10.022229 717142 out.go:177]
** /stderr **
pause_test.go:109: failed to pause minikube with args: "out/minikube-linux-amd64 pause -p pause-20210813001951-679351 --alsologtostderr -v=5" : exit status 80
helpers_test.go:223: -----------------------post-mortem--------------------------------
helpers_test.go:240: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p pause-20210813001951-679351 -n pause-20210813001951-679351
helpers_test.go:240: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p pause-20210813001951-679351 -n pause-20210813001951-679351: exit status 2 (308.578748ms)
-- stdout --
Running
-- /stdout --
helpers_test.go:240: status error: exit status 2 (may be ok)
helpers_test.go:245: <<< TestPause/serial/PauseAgain FAILED: start of post-mortem logs <<<
helpers_test.go:246: ======> post-mortem[TestPause/serial/PauseAgain]: minikube logs <======
helpers_test.go:248: (dbg) Run: out/minikube-linux-amd64 -p pause-20210813001951-679351 logs -n 25
=== CONT TestPause/serial/PauseAgain
helpers_test.go:248: (dbg) Done: out/minikube-linux-amd64 -p pause-20210813001951-679351 logs -n 25: (1.402347725s)
helpers_test.go:253: TestPause/serial/PauseAgain logs:
-- stdout --
*
* ==> Audit <==
* |---------|------------------------------------------|------------------------------------------|---------|---------|-------------------------------|-------------------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|------------------------------------------|------------------------------------------|---------|---------|-------------------------------|-------------------------------|
| start | -p | scheduled-stop-20210813001820-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:18:20 UTC | Fri, 13 Aug 2021 00:19:22 UTC |
| | scheduled-stop-20210813001820-679351 | | | | | |
| | --memory=2048 --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| stop | -p | scheduled-stop-20210813001820-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:19:22 UTC | Fri, 13 Aug 2021 00:19:22 UTC |
| | scheduled-stop-20210813001820-679351 | | | | | |
| | --cancel-scheduled | | | | | |
| stop | -p | scheduled-stop-20210813001820-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:19:35 UTC | Fri, 13 Aug 2021 00:19:42 UTC |
| | scheduled-stop-20210813001820-679351 | | | | | |
| | --schedule 5s | | | | | |
| delete | -p | scheduled-stop-20210813001820-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:19:50 UTC | Fri, 13 Aug 2021 00:19:51 UTC |
| | scheduled-stop-20210813001820-679351 | | | | | |
| start | -p | force-systemd-env-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:19:51 UTC | Fri, 13 Aug 2021 00:21:03 UTC |
| | force-systemd-env-20210813001951-679351 | | | | | |
| | --memory=2048 --alsologtostderr | | | | | |
| | -v=5 --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| -p | force-systemd-env-20210813001951-679351 | force-systemd-env-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:21:03 UTC | Fri, 13 Aug 2021 00:21:04 UTC |
| | ssh cat /etc/containerd/config.toml | | | | | |
| delete | -p | force-systemd-env-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:21:04 UTC | Fri, 13 Aug 2021 00:21:05 UTC |
| | force-systemd-env-20210813001951-679351 | | | | | |
| delete | -p | kubenet-20210813002105-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:21:05 UTC | Fri, 13 Aug 2021 00:21:05 UTC |
| | kubenet-20210813002105-679351 | | | | | |
| delete | -p false-20210813002105-679351 | false-20210813002105-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:21:05 UTC | Fri, 13 Aug 2021 00:21:05 UTC |
| start | -p | offline-containerd-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:19:51 UTC | Fri, 13 Aug 2021 00:22:10 UTC |
| | offline-containerd-20210813001951-679351 | | | | | |
| | --alsologtostderr -v=1 --memory=2048 | | | | | |
| | --wait=true --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| delete | -p | offline-containerd-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:22:10 UTC | Fri, 13 Aug 2021 00:22:11 UTC |
| | offline-containerd-20210813001951-679351 | | | | | |
| start | -p | force-systemd-flag-20210813002108-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:21:08 UTC | Fri, 13 Aug 2021 00:22:38 UTC |
| | force-systemd-flag-20210813002108-679351 | | | | | |
| | --memory=2048 --force-systemd | | | | | |
| | --alsologtostderr -v=5 --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| -p | force-systemd-flag-20210813002108-679351 | force-systemd-flag-20210813002108-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:22:38 UTC | Fri, 13 Aug 2021 00:22:38 UTC |
| | ssh cat /etc/containerd/config.toml | | | | | |
| delete | -p | force-systemd-flag-20210813002108-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:22:38 UTC | Fri, 13 Aug 2021 00:22:40 UTC |
| | force-systemd-flag-20210813002108-679351 | | | | | |
| start | -p pause-20210813001951-679351 | pause-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:19:51 UTC | Fri, 13 Aug 2021 00:22:49 UTC |
| | --memory=2048 | | | | | |
| | --install-addons=false | | | | | |
| | --wait=all --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| start | -p | cert-options-20210813002211-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:22:11 UTC | Fri, 13 Aug 2021 00:23:29 UTC |
| | cert-options-20210813002211-679351 | | | | | |
| | --memory=2048 | | | | | |
| | --apiserver-ips=127.0.0.1 | | | | | |
| | --apiserver-ips=192.168.15.15 | | | | | |
| | --apiserver-names=localhost | | | | | |
| | --apiserver-names=www.google.com | | | | | |
| | --apiserver-port=8555 | | | | | |
| | --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| -p | cert-options-20210813002211-679351 | cert-options-20210813002211-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:23:29 UTC | Fri, 13 Aug 2021 00:23:29 UTC |
| | ssh openssl x509 -text -noout -in | | | | | |
| | /var/lib/minikube/certs/apiserver.crt | | | | | |
| delete | -p | cert-options-20210813002211-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:23:30 UTC | Fri, 13 Aug 2021 00:23:31 UTC |
| | cert-options-20210813002211-679351 | | | | | |
| start | -p | stopped-upgrade-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:22:29 UTC | Fri, 13 Aug 2021 00:23:40 UTC |
| | stopped-upgrade-20210813001951-679351 | | | | | |
| | --memory=2200 --alsologtostderr | | | | | |
| | -v=1 --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| logs | -p | stopped-upgrade-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:23:40 UTC | Fri, 13 Aug 2021 00:23:42 UTC |
| | stopped-upgrade-20210813001951-679351 | | | | | |
| delete | -p | stopped-upgrade-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:23:42 UTC | Fri, 13 Aug 2021 00:23:43 UTC |
| | stopped-upgrade-20210813001951-679351 | | | | | |
| start | -p pause-20210813001951-679351 | pause-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:22:49 UTC | Fri, 13 Aug 2021 00:23:59 UTC |
| | --alsologtostderr | | | | | |
| | -v=1 --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| pause | -p pause-20210813001951-679351 | pause-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:23:59 UTC | Fri, 13 Aug 2021 00:24:00 UTC |
| | --alsologtostderr -v=5 | | | | | |
| unpause | -p pause-20210813001951-679351 | pause-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:24:00 UTC | Fri, 13 Aug 2021 00:24:03 UTC |
| | --alsologtostderr -v=5 | | | | | |
| start | -p | kubernetes-upgrade-20210813002240-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:22:40 UTC | Fri, 13 Aug 2021 00:24:04 UTC |
| | kubernetes-upgrade-20210813002240-679351 | | | | | |
| | --memory=2200 | | | | | |
| | --kubernetes-version=v1.14.0 | | | | | |
| | --alsologtostderr -v=1 --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
|---------|------------------------------------------|------------------------------------------|---------|---------|-------------------------------|-------------------------------|
*
* ==> Last Start <==
* Log file created at: 2021/08/13 00:23:43
Running on machine: debian-jenkins-agent-10
Binary: Built with gc go1.16.7 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0813 00:23:43.503390 716767 out.go:298] Setting OutFile to fd 1 ...
I0813 00:23:43.503468 716767 out.go:345] TERM=,COLORTERM=, which probably does not support color
I0813 00:23:43.503471 716767 out.go:311] Setting ErrFile to fd 2...
I0813 00:23:43.503474 716767 out.go:345] TERM=,COLORTERM=, which probably does not support color
I0813 00:23:43.503593 716767 root.go:313] Updating PATH: /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/bin
I0813 00:23:43.503852 716767 out.go:305] Setting JSON to false
I0813 00:23:43.540034 716767 start.go:111] hostinfo: {"hostname":"debian-jenkins-agent-10","uptime":14787,"bootTime":1628799437,"procs":199,"os":"linux","platform":"debian","platformFamily":"debian","platformVersion":"9.13","kernelVersion":"4.9.0-16-amd64","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"c29e0b88-ef83-6765-d2fa-208fdce1af32"}
I0813 00:23:43.540691 716767 start.go:121] virtualization: kvm guest
I0813 00:23:43.543734 716767 out.go:177] * [auto-20210813002105-679351] minikube v1.22.0 on Debian 9.13 (kvm/amd64)
I0813 00:23:43.545313 716767 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/kubeconfig
I0813 00:23:43.543908 716767 notify.go:169] Checking for updates...
I0813 00:23:43.546871 716767 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0813 00:23:43.548308 716767 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube
I0813 00:23:43.549680 716767 out.go:177] - MINIKUBE_LOCATION=12230
I0813 00:23:43.550290 716767 driver.go:335] Setting default libvirt URI to qemu:///system
I0813 00:23:43.581612 716767 out.go:177] * Using the kvm2 driver based on user configuration
I0813 00:23:43.581645 716767 start.go:278] selected driver: kvm2
I0813 00:23:43.581651 716767 start.go:751] validating driver "kvm2" against <nil>
I0813 00:23:43.581671 716767 start.go:762] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc:}
I0813 00:23:43.582857 716767 install.go:52] acquiring lock: {Name:mk900956b073697a4aa6c80a27c6bb0742a99a53 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0813 00:23:43.583045 716767 install.go:117] Validating docker-machine-driver-kvm2, PATH=/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/bin:/home/jenkins/workspace/KVM_Linux_containerd_integration/out/:/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games:/usr/local/go/bin:/home/jenkins/go/bin:/usr/local/bin/:/usr/local/go/bin/:/home/jenkins/go/bin
I0813 00:23:43.596309 716767 install.go:137] /home/jenkins/workspace/KVM_Linux_containerd_integration/out/docker-machine-driver-kvm2 version is 1.22.0
I0813 00:23:43.596386 716767 start_flags.go:263] no existing cluster config was found, will generate one from the flags
I0813 00:23:43.596590 716767 start_flags.go:697] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0813 00:23:43.596631 716767 cni.go:93] Creating CNI manager for ""
I0813 00:23:43.596642 716767 cni.go:163] "kvm2" driver + containerd runtime found, recommending bridge
I0813 00:23:43.596653 716767 start_flags.go:272] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0813 00:23:43.596674 716767 start_flags.go:277] config:
{Name:auto-20210813002105-679351 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.25@sha256:6f936e3443b95cd918d77623bf7b595653bb382766e280290a02b4a349e88b79 Memory:2048 CPUs:2 DiskSize:20000 VMDriver: Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.21.3 ClusterName:auto-20210813002105-679351 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: Ne
tworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:5m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: MultiNodeRequested:false ExtraDisks:0}
I0813 00:23:43.596829 716767 iso.go:123] acquiring lock: {Name:mke80f4e00d5590a17349e0875191e5cd211cb9b Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0813 00:23:39.698092 716165 pod_ready.go:102] pod "kube-controller-manager-pause-20210813001951-679351" in "kube-system" namespace has status "Ready":"False"
I0813 00:23:42.199438 716165 pod_ready.go:102] pod "kube-controller-manager-pause-20210813001951-679351" in "kube-system" namespace has status "Ready":"False"
I0813 00:23:46.628507 716004 out.go:204] - Configuring RBAC rules ...
I0813 00:23:47.055082 716004 cni.go:93] Creating CNI manager for ""
I0813 00:23:47.055113 716004 cni.go:163] "kvm2" driver + containerd runtime found, recommending bridge
I0813 00:23:47.057109 716004 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
I0813 00:23:47.057193 716004 ssh_runner.go:149] Run: sudo mkdir -p /etc/cni/net.d
I0813 00:23:47.065594 716004 ssh_runner.go:316] scp memory --> /etc/cni/net.d/1-k8s.conflist (457 bytes)
I0813 00:23:47.082382 716004 ssh_runner.go:149] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0813 00:23:47.082440 716004 ssh_runner.go:149] Run: sudo /var/lib/minikube/binaries/v1.14.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0813 00:23:47.082452 716004 ssh_runner.go:149] Run: sudo /var/lib/minikube/binaries/v1.14.0/kubectl label nodes minikube.k8s.io/version=v1.22.0 minikube.k8s.io/commit=dc1c3ca26e9449ce488a773126b8450402c94a19 minikube.k8s.io/name=kubernetes-upgrade-20210813002240-679351 minikube.k8s.io/updated_at=2021_08_13T00_23_47_0700 --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig
I0813 00:23:47.107481 716004 ops.go:34] apiserver oom_adj: 16
I0813 00:23:47.107506 716004 ops.go:39] adjusting apiserver oom_adj to -10
I0813 00:23:47.107540 716004 ssh_runner.go:149] Run: /bin/bash -c "echo -10 | sudo tee /proc/$(pgrep kube-apiserver)/oom_adj"
I0813 00:23:47.458289 716004 kubeadm.go:985] duration metric: took 375.90634ms to wait for elevateKubeSystemPrivileges.
I0813 00:23:47.458364 716004 kubeadm.go:392] StartCluster complete in 17.107930015s
I0813 00:23:47.458404 716004 settings.go:142] acquiring lock: {Name:mk513992707531c891d59a503efeac355a20c006 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0813 00:23:47.458521 716004 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/kubeconfig
I0813 00:23:47.460307 716004 lock.go:36] WriteFile acquiring /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/kubeconfig: {Name:mk4539f4325bfd6eb26b6ddb5c7e1835c2548cd2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0813 00:23:47.461424 716004 kapi.go:59] client config for kubernetes-upgrade-20210813002240-679351: &rest.Config{Host:"https://192.168.50.136:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/profiles/kubernetes-upgrade-20210813002240-679351/client.crt", KeyFile:"/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/pr
ofiles/kubernetes-upgrade-20210813002240-679351/client.key", CAFile:"/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x17e2a80), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0813 00:23:47.990899 716004 kapi.go:244] deployment "coredns" in namespace "kube-system" and context "kubernetes-upgrade-20210813002240-679351" rescaled to 1
I0813 00:23:47.990962 716004 start.go:226] Will wait 6m0s for node &{Name: IP:192.168.50.136 Port:8443 KubernetesVersion:v1.14.0 ControlPlane:true Worker:true}
I0813 00:23:47.993234 716004 out.go:177] * Verifying Kubernetes components...
I0813 00:23:47.993294 716004 ssh_runner.go:149] Run: sudo systemctl is-active --quiet service kubelet
I0813 00:23:47.991080 716004 ssh_runner.go:149] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.14.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0813 00:23:47.991086 716004 addons.go:342] enableAddons start: toEnable=map[], additional=[]
I0813 00:23:47.993410 716004 addons.go:59] Setting storage-provisioner=true in profile "kubernetes-upgrade-20210813002240-679351"
I0813 00:23:47.993436 716004 addons.go:135] Setting addon storage-provisioner=true in "kubernetes-upgrade-20210813002240-679351"
W0813 00:23:47.993448 716004 addons.go:147] addon storage-provisioner should already be in state true
I0813 00:23:47.993443 716004 addons.go:59] Setting default-storageclass=true in profile "kubernetes-upgrade-20210813002240-679351"
I0813 00:23:47.993463 716004 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "kubernetes-upgrade-20210813002240-679351"
I0813 00:23:47.993480 716004 host.go:66] Checking if "kubernetes-upgrade-20210813002240-679351" exists ...
I0813 00:23:47.993992 716004 main.go:130] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_containerd_integration/out/docker-machine-driver-kvm2
I0813 00:23:47.994005 716004 main.go:130] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_containerd_integration/out/docker-machine-driver-kvm2
I0813 00:23:47.994039 716004 main.go:130] libmachine: Launching plugin server for driver kvm2
I0813 00:23:47.994153 716004 main.go:130] libmachine: Launching plugin server for driver kvm2
I0813 00:23:48.006889 716004 main.go:130] libmachine: Plugin server listening at address 127.0.0.1:32837
I0813 00:23:48.007383 716004 main.go:130] libmachine: () Calling .GetVersion
I0813 00:23:48.007952 716004 main.go:130] libmachine: Using API Version 1
I0813 00:23:48.007979 716004 main.go:130] libmachine: () Calling .SetConfigRaw
I0813 00:23:48.008377 716004 main.go:130] libmachine: () Calling .GetMachineName
I0813 00:23:48.008533 716004 main.go:130] libmachine: Plugin server listening at address 127.0.0.1:41115
I0813 00:23:48.008595 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .GetState
I0813 00:23:48.008997 716004 main.go:130] libmachine: () Calling .GetVersion
I0813 00:23:48.009614 716004 main.go:130] libmachine: Using API Version 1
I0813 00:23:48.009645 716004 main.go:130] libmachine: () Calling .SetConfigRaw
I0813 00:23:48.010049 716004 main.go:130] libmachine: () Calling .GetMachineName
I0813 00:23:48.010666 716004 main.go:130] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_containerd_integration/out/docker-machine-driver-kvm2
I0813 00:23:48.010719 716004 main.go:130] libmachine: Launching plugin server for driver kvm2
I0813 00:23:48.013509 716004 kapi.go:59] client config for kubernetes-upgrade-20210813002240-679351: &rest.Config{Host:"https://192.168.50.136:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/profiles/kubernetes-upgrade-20210813002240-679351/client.crt", KeyFile:"/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/pr
ofiles/kubernetes-upgrade-20210813002240-679351/client.key", CAFile:"/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x17e2a80), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0813 00:23:48.021992 716004 main.go:130] libmachine: Plugin server listening at address 127.0.0.1:38909
I0813 00:23:48.022484 716004 main.go:130] libmachine: () Calling .GetVersion
I0813 00:23:48.022980 716004 main.go:130] libmachine: Using API Version 1
I0813 00:23:48.023002 716004 main.go:130] libmachine: () Calling .SetConfigRaw
I0813 00:23:48.023179 716004 addons.go:135] Setting addon default-storageclass=true in "kubernetes-upgrade-20210813002240-679351"
W0813 00:23:48.023202 716004 addons.go:147] addon default-storageclass should already be in state true
I0813 00:23:48.023234 716004 host.go:66] Checking if "kubernetes-upgrade-20210813002240-679351" exists ...
I0813 00:23:48.023382 716004 main.go:130] libmachine: () Calling .GetMachineName
I0813 00:23:48.023527 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .GetState
I0813 00:23:48.023657 716004 main.go:130] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_containerd_integration/out/docker-machine-driver-kvm2
I0813 00:23:48.023705 716004 main.go:130] libmachine: Launching plugin server for driver kvm2
I0813 00:23:48.027027 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .DriverName
I0813 00:23:43.598996 716767 out.go:177] * Starting control plane node auto-20210813002105-679351 in cluster auto-20210813002105-679351
I0813 00:23:43.599019 716767 preload.go:131] Checking if preload exists for k8s version v1.21.3 and runtime containerd
I0813 00:23:43.599064 716767 preload.go:147] Found local preload: /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v11-v1.21.3-containerd-overlay2-amd64.tar.lz4
I0813 00:23:43.599089 716767 cache.go:56] Caching tarball of preloaded images
I0813 00:23:43.599192 716767 preload.go:173] Found /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v11-v1.21.3-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I0813 00:23:43.599219 716767 cache.go:59] Finished verifying existence of preloaded tar for v1.21.3 on containerd
I0813 00:23:43.599348 716767 profile.go:148] Saving config to /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/profiles/auto-20210813002105-679351/config.json ...
I0813 00:23:43.599382 716767 lock.go:36] WriteFile acquiring /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/profiles/auto-20210813002105-679351/config.json: {Name:mk1aa685bcd0f4ba75f0a968f048380e5aff9662 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0813 00:23:43.599562 716767 cache.go:205] Successfully downloaded all kic artifacts
I0813 00:23:43.599605 716767 start.go:313] acquiring machines lock for auto-20210813002105-679351: {Name:mk522658ca6319f8a1c60d46c1e97d60752e8eaa Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
I0813 00:23:48.029261 716004 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0813 00:23:48.029368 716004 addons.go:275] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0813 00:23:48.029383 716004 ssh_runner.go:316] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0813 00:23:48.029407 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .GetSSHHostname
I0813 00:23:48.035762 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) DBG | domain kubernetes-upgrade-20210813002240-679351 has defined MAC address 52:54:00:60:d0:ea in network mk-kubernetes-upgrade-20210813002240-679351
I0813 00:23:48.036076 716004 main.go:130] libmachine: Plugin server listening at address 127.0.0.1:42729
I0813 00:23:48.036211 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:60:d0:ea", ip: ""} in network mk-kubernetes-upgrade-20210813002240-679351: {Iface:virbr5 ExpiryTime:2021-08-13 01:23:04 +0000 UTC Type:0 Mac:52:54:00:60:d0:ea Iaid: IPaddr:192.168.50.136 Prefix:24 Hostname:kubernetes-upgrade-20210813002240-679351 Clientid:01:52:54:00:60:d0:ea}
I0813 00:23:48.036241 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) DBG | domain kubernetes-upgrade-20210813002240-679351 has defined IP address 192.168.50.136 and MAC address 52:54:00:60:d0:ea in network mk-kubernetes-upgrade-20210813002240-679351
I0813 00:23:48.036417 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .GetSSHPort
I0813 00:23:48.036592 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .GetSSHKeyPath
I0813 00:23:48.036688 716004 main.go:130] libmachine: () Calling .GetVersion
I0813 00:23:48.036725 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .GetSSHUsername
I0813 00:23:48.036832 716004 sshutil.go:53] new ssh client: &{IP:192.168.50.136 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/machines/kubernetes-upgrade-20210813002240-679351/id_rsa Username:docker}
I0813 00:23:48.037184 716004 main.go:130] libmachine: Using API Version 1
I0813 00:23:48.037211 716004 main.go:130] libmachine: () Calling .SetConfigRaw
I0813 00:23:48.037592 716004 main.go:130] libmachine: () Calling .GetMachineName
I0813 00:23:48.038274 716004 main.go:130] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_containerd_integration/out/docker-machine-driver-kvm2
I0813 00:23:48.038339 716004 main.go:130] libmachine: Launching plugin server for driver kvm2
I0813 00:23:48.049879 716004 main.go:130] libmachine: Plugin server listening at address 127.0.0.1:37019
I0813 00:23:48.050283 716004 main.go:130] libmachine: () Calling .GetVersion
I0813 00:23:48.050784 716004 main.go:130] libmachine: Using API Version 1
I0813 00:23:48.050821 716004 main.go:130] libmachine: () Calling .SetConfigRaw
I0813 00:23:48.051201 716004 main.go:130] libmachine: () Calling .GetMachineName
I0813 00:23:48.051381 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .GetState
I0813 00:23:48.054477 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .DriverName
I0813 00:23:48.054703 716004 addons.go:275] installing /etc/kubernetes/addons/storageclass.yaml
I0813 00:23:48.054720 716004 ssh_runner.go:316] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0813 00:23:48.054736 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .GetSSHHostname
I0813 00:23:48.060467 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) DBG | domain kubernetes-upgrade-20210813002240-679351 has defined MAC address 52:54:00:60:d0:ea in network mk-kubernetes-upgrade-20210813002240-679351
I0813 00:23:48.060946 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:60:d0:ea", ip: ""} in network mk-kubernetes-upgrade-20210813002240-679351: {Iface:virbr5 ExpiryTime:2021-08-13 01:23:04 +0000 UTC Type:0 Mac:52:54:00:60:d0:ea Iaid: IPaddr:192.168.50.136 Prefix:24 Hostname:kubernetes-upgrade-20210813002240-679351 Clientid:01:52:54:00:60:d0:ea}
I0813 00:23:48.060984 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) DBG | domain kubernetes-upgrade-20210813002240-679351 has defined IP address 192.168.50.136 and MAC address 52:54:00:60:d0:ea in network mk-kubernetes-upgrade-20210813002240-679351
I0813 00:23:48.061168 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .GetSSHPort
I0813 00:23:48.061333 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .GetSSHKeyPath
I0813 00:23:48.061483 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .GetSSHUsername
I0813 00:23:48.061627 716004 sshutil.go:53] new ssh client: &{IP:192.168.50.136 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/machines/kubernetes-upgrade-20210813002240-679351/id_rsa Username:docker}
I0813 00:23:48.123399 716004 ssh_runner.go:149] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.14.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.50.1 host.minikube.internal\n fallthrough\n }' | sudo /var/lib/minikube/binaries/v1.14.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0813 00:23:48.124657 716004 kapi.go:59] client config for kubernetes-upgrade-20210813002240-679351: &rest.Config{Host:"https://192.168.50.136:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/profiles/kubernetes-upgrade-20210813002240-679351/client.crt", KeyFile:"/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/pr
ofiles/kubernetes-upgrade-20210813002240-679351/client.key", CAFile:"/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x17e2a80), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0813 00:23:48.126985 716004 api_server.go:50] waiting for apiserver process to appear ...
I0813 00:23:48.127046 716004 ssh_runner.go:149] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0813 00:23:48.139267 716004 ssh_runner.go:149] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.14.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0813 00:23:48.184291 716004 ssh_runner.go:149] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.14.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0813 00:23:48.712890 716004 start.go:736] {"host.minikube.internal": 192.168.50.1} host record injected into CoreDNS
I0813 00:23:48.712931 716004 api_server.go:70] duration metric: took 721.940951ms to wait for apiserver process to appear ...
I0813 00:23:48.712951 716004 api_server.go:86] waiting for apiserver healthz status ...
I0813 00:23:48.712963 716004 api_server.go:239] Checking apiserver healthz at https://192.168.50.136:8443/healthz ...
I0813 00:23:48.725603 716004 api_server.go:265] https://192.168.50.136:8443/healthz returned 200:
ok
I0813 00:23:48.726662 716004 api_server.go:139] control plane version: v1.14.0
I0813 00:23:48.726686 716004 api_server.go:129] duration metric: took 13.728785ms to wait for apiserver health ...
I0813 00:23:48.726696 716004 system_pods.go:43] waiting for kube-system pods to appear ...
I0813 00:23:48.737220 716004 system_pods.go:59] 0 kube-system pods found
I0813 00:23:48.737248 716004 retry.go:31] will retry after 305.063636ms: only 0 pod(s) have shown up
I0813 00:23:48.814052 716004 main.go:130] libmachine: Making call to close driver server
I0813 00:23:48.814093 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .Close
I0813 00:23:48.814102 716004 main.go:130] libmachine: Making call to close driver server
I0813 00:23:48.814123 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .Close
I0813 00:23:48.814419 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) DBG | Closing plugin on server side
I0813 00:23:48.814457 716004 main.go:130] libmachine: Successfully made call to close driver server
I0813 00:23:48.814473 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) DBG | Closing plugin on server side
I0813 00:23:48.814478 716004 main.go:130] libmachine: Making call to close connection to plugin binary
I0813 00:23:48.814478 716004 main.go:130] libmachine: Successfully made call to close driver server
I0813 00:23:48.814491 716004 main.go:130] libmachine: Making call to close driver server
I0813 00:23:48.814502 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .Close
I0813 00:23:48.814502 716004 main.go:130] libmachine: Making call to close connection to plugin binary
I0813 00:23:48.814569 716004 main.go:130] libmachine: Making call to close driver server
I0813 00:23:48.814578 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .Close
I0813 00:23:48.814771 716004 main.go:130] libmachine: Successfully made call to close driver server
I0813 00:23:48.814788 716004 main.go:130] libmachine: Making call to close connection to plugin binary
I0813 00:23:48.816052 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) DBG | Closing plugin on server side
I0813 00:23:48.816067 716004 main.go:130] libmachine: Successfully made call to close driver server
I0813 00:23:48.816101 716004 main.go:130] libmachine: Making call to close connection to plugin binary
I0813 00:23:48.816129 716004 main.go:130] libmachine: Making call to close driver server
I0813 00:23:48.816150 716004 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .Close
I0813 00:23:48.816368 716004 main.go:130] libmachine: Successfully made call to close driver server
I0813 00:23:48.816382 716004 main.go:130] libmachine: Making call to close connection to plugin binary
I0813 00:23:44.696016 716165 pod_ready.go:102] pod "kube-controller-manager-pause-20210813001951-679351" in "kube-system" namespace has status "Ready":"False"
I0813 00:23:47.195507 716165 pod_ready.go:102] pod "kube-controller-manager-pause-20210813001951-679351" in "kube-system" namespace has status "Ready":"False"
I0813 00:23:49.203255 716165 pod_ready.go:102] pod "kube-controller-manager-pause-20210813001951-679351" in "kube-system" namespace has status "Ready":"False"
I0813 00:23:48.818579 716004 out.go:177] * Enabled addons: storage-provisioner, default-storageclass
I0813 00:23:48.818606 716004 addons.go:344] enableAddons completed in 827.536284ms
I0813 00:23:49.047612 716004 system_pods.go:59] 1 kube-system pods found
I0813 00:23:49.047654 716004 system_pods.go:61] "storage-provisioner" [baa2edae-fbcc-11eb-bfc1-52540060d0ea] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taints that the pod didn't tolerate.)
I0813 00:23:49.047670 716004 retry.go:31] will retry after 338.212508ms: only 1 pod(s) have shown up
I0813 00:23:49.390176 716004 system_pods.go:59] 1 kube-system pods found
I0813 00:23:49.390210 716004 system_pods.go:61] "storage-provisioner" [baa2edae-fbcc-11eb-bfc1-52540060d0ea] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taints that the pod didn't tolerate.)
I0813 00:23:49.390224 716004 retry.go:31] will retry after 378.459802ms: only 1 pod(s) have shown up
I0813 00:23:49.774922 716004 system_pods.go:59] 1 kube-system pods found
I0813 00:23:49.774952 716004 system_pods.go:61] "storage-provisioner" [baa2edae-fbcc-11eb-bfc1-52540060d0ea] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taints that the pod didn't tolerate.)
I0813 00:23:49.774966 716004 retry.go:31] will retry after 469.882201ms: only 1 pod(s) have shown up
I0813 00:23:50.249464 716004 system_pods.go:59] 1 kube-system pods found
I0813 00:23:50.249502 716004 system_pods.go:61] "storage-provisioner" [baa2edae-fbcc-11eb-bfc1-52540060d0ea] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taints that the pod didn't tolerate.)
I0813 00:23:50.249536 716004 retry.go:31] will retry after 667.365439ms: only 1 pod(s) have shown up
I0813 00:23:51.040492 716767 start.go:317] acquired machines lock for "auto-20210813002105-679351" in 7.440857589s
I0813 00:23:51.040557 716767 start.go:89] Provisioning new machine with config: &{Name:auto-20210813002105-679351 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/12122/minikube-v1.22.0-1628238775-12122.iso KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.25@sha256:6f936e3443b95cd918d77623bf7b595653bb382766e280290a02b4a349e88b79 Memory:2048 CPUs:2 DiskSize:20000 VMDriver: Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.21.3 Clust
erName:auto-20210813002105-679351 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.21.3 ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:5m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: MultiNodeRequested:false ExtraDisks:0} &{Name: IP: Port:8443 KubernetesVersion:v1.21.3 ControlPlane:true Worker:true}
I0813 00:23:51.040704 716767 start.go:126] createHost starting for "" (driver="kvm2")
I0813 00:23:51.044070 716767 out.go:204] * Creating kvm2 VM (CPUs=2, Memory=2048MB, Disk=20000MB) ...
I0813 00:23:51.044287 716767 main.go:130] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_containerd_integration/out/docker-machine-driver-kvm2
I0813 00:23:51.044349 716767 main.go:130] libmachine: Launching plugin server for driver kvm2
I0813 00:23:51.058778 716767 main.go:130] libmachine: Plugin server listening at address 127.0.0.1:43465
I0813 00:23:51.059220 716767 main.go:130] libmachine: () Calling .GetVersion
I0813 00:23:51.059811 716767 main.go:130] libmachine: Using API Version 1
I0813 00:23:51.059836 716767 main.go:130] libmachine: () Calling .SetConfigRaw
I0813 00:23:51.060332 716767 main.go:130] libmachine: () Calling .GetMachineName
I0813 00:23:51.060537 716767 main.go:130] libmachine: (auto-20210813002105-679351) Calling .GetMachineName
I0813 00:23:51.060720 716767 main.go:130] libmachine: (auto-20210813002105-679351) Calling .DriverName
I0813 00:23:51.060904 716767 start.go:160] libmachine.API.Create for "auto-20210813002105-679351" (driver="kvm2")
I0813 00:23:51.060935 716767 client.go:168] LocalClient.Create starting
I0813 00:23:51.060976 716767 main.go:130] libmachine: Reading certificate data from /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/certs/ca.pem
I0813 00:23:51.061043 716767 main.go:130] libmachine: Decoding PEM data...
I0813 00:23:51.061067 716767 main.go:130] libmachine: Parsing certificate...
I0813 00:23:51.061228 716767 main.go:130] libmachine: Reading certificate data from /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/certs/cert.pem
I0813 00:23:51.061251 716767 main.go:130] libmachine: Decoding PEM data...
I0813 00:23:51.061271 716767 main.go:130] libmachine: Parsing certificate...
I0813 00:23:51.061330 716767 main.go:130] libmachine: Running pre-create checks...
I0813 00:23:51.061345 716767 main.go:130] libmachine: (auto-20210813002105-679351) Calling .PreCreateCheck
I0813 00:23:51.061718 716767 main.go:130] libmachine: (auto-20210813002105-679351) Calling .GetConfigRaw
I0813 00:23:51.062177 716767 main.go:130] libmachine: Creating machine...
I0813 00:23:51.062193 716767 main.go:130] libmachine: (auto-20210813002105-679351) Calling .Create
I0813 00:23:51.062334 716767 main.go:130] libmachine: (auto-20210813002105-679351) Creating KVM machine...
I0813 00:23:51.065144 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | found existing default KVM network
I0813 00:23:51.067403 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:51.067189 716910 network.go:240] skipping subnet 192.168.39.0/24 that is taken: &{IP:192.168.39.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.39.0/24 Gateway:192.168.39.1 ClientMin:192.168.39.2 ClientMax:192.168.39.254 Broadcast:192.168.39.255 Interface:{IfaceName:virbr6 IfaceIPv4:192.168.39.1 IfaceMTU:1500 IfaceMAC:52:54:00:c9:12:3a}}
I0813 00:23:51.069702 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:51.069605 716910 network.go:240] skipping subnet 192.168.50.0/24 that is taken: &{IP:192.168.50.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.50.0/24 Gateway:192.168.50.1 ClientMin:192.168.50.2 ClientMax:192.168.50.254 Broadcast:192.168.50.255 Interface:{IfaceName:virbr5 IfaceIPv4:192.168.50.1 IfaceMTU:1500 IfaceMAC:52:54:00:e7:55:97}}
I0813 00:23:51.071022 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:51.070925 716910 network.go:240] skipping subnet 192.168.61.0/24 that is taken: &{IP:192.168.61.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.61.0/24 Gateway:192.168.61.1 ClientMin:192.168.61.2 ClientMax:192.168.61.254 Broadcast:192.168.61.255 Interface:{IfaceName:virbr3 IfaceIPv4:192.168.61.1 IfaceMTU:1500 IfaceMAC:52:54:00:ef:5a:89}}
I0813 00:23:51.074043 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:51.073926 716910 network.go:240] skipping subnet 192.168.72.0/24 that is taken: &{IP:192.168.72.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.72.0/24 Gateway:192.168.72.1 ClientMin:192.168.72.2 ClientMax:192.168.72.254 Broadcast:192.168.72.255 Interface:{IfaceName:virbr4 IfaceIPv4:192.168.72.1 IfaceMTU:1500 IfaceMAC:52:54:00:3b:67:48}}
I0813 00:23:51.075430 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:51.075356 716910 network.go:240] skipping subnet 192.168.83.0/24 that is taken: &{IP:192.168.83.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.83.0/24 Gateway:192.168.83.1 ClientMin:192.168.83.2 ClientMax:192.168.83.254 Broadcast:192.168.83.255 Interface:{IfaceName:virbr9 IfaceIPv4:192.168.83.1 IfaceMTU:1500 IfaceMAC:52:54:00:2a:df:7d}}
I0813 00:23:51.077354 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:51.077276 716910 network.go:288] reserving subnet 192.168.94.0 for 1m0s: &{mu:{state:0 sema:0} read:{v:{m:map[] amended:true}} dirty:map[192.168.94.0:0xc00018c018] misses:0}
I0813 00:23:51.077382 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:51.077313 716910 network.go:235] using free private subnet 192.168.94.0/24: &{IP:192.168.94.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.94.0/24 Gateway:192.168.94.1 ClientMin:192.168.94.2 ClientMax:192.168.94.254 Broadcast:192.168.94.255 Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:}}
I0813 00:23:51.100332 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | trying to create private KVM network mk-auto-20210813002105-679351 192.168.94.0/24...
I0813 00:23:51.334015 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | private KVM network mk-auto-20210813002105-679351 192.168.94.0/24 created
I0813 00:23:51.334080 716767 main.go:130] libmachine: (auto-20210813002105-679351) Setting up store path in /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/machines/auto-20210813002105-679351 ...
I0813 00:23:51.334108 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:51.333962 716910 common.go:108] Making disk image using store path: /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube
I0813 00:23:51.334134 716767 main.go:130] libmachine: (auto-20210813002105-679351) Building disk image from file:///home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/cache/iso/minikube-v1.22.0-1628238775-12122.iso
I0813 00:23:51.334270 716767 main.go:130] libmachine: (auto-20210813002105-679351) Downloading /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/cache/boot2docker.iso from file:///home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/cache/iso/minikube-v1.22.0-1628238775-12122.iso...
I0813 00:23:51.534972 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:51.534844 716910 common.go:115] Creating ssh key: /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/machines/auto-20210813002105-679351/id_rsa...
I0813 00:23:51.606821 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:51.606691 716910 common.go:121] Creating raw disk image: /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/machines/auto-20210813002105-679351/auto-20210813002105-679351.rawdisk...
I0813 00:23:51.606866 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | Writing magic tar header
I0813 00:23:51.606918 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | Writing SSH key tar header
I0813 00:23:51.606957 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:51.606813 716910 common.go:135] Fixing permissions on /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/machines/auto-20210813002105-679351 ...
I0813 00:23:51.606992 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | Checking permissions on dir: /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/machines/auto-20210813002105-679351
I0813 00:23:51.607009 716767 main.go:130] libmachine: (auto-20210813002105-679351) Setting executable bit set on /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/machines/auto-20210813002105-679351 (perms=drwx------)
I0813 00:23:51.607020 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | Checking permissions on dir: /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/machines
I0813 00:23:51.607036 716767 main.go:130] libmachine: (auto-20210813002105-679351) Setting executable bit set on /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/machines (perms=drwxr-xr-x)
I0813 00:23:51.607046 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | Checking permissions on dir: /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube
I0813 00:23:51.607066 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | Checking permissions on dir: /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b
I0813 00:23:51.607076 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | Checking permissions on dir: /home/jenkins/minikube-integration
I0813 00:23:51.607088 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | Checking permissions on dir: /home/jenkins
I0813 00:23:51.607096 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | Checking permissions on dir: /home
I0813 00:23:51.607104 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | Skipping /home - not owner
I0813 00:23:51.607124 716767 main.go:130] libmachine: (auto-20210813002105-679351) Setting executable bit set on /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube (perms=drwxr-xr-x)
I0813 00:23:51.607148 716767 main.go:130] libmachine: (auto-20210813002105-679351) Setting executable bit set on /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b (perms=drwxr-xr-x)
I0813 00:23:51.607174 716767 main.go:130] libmachine: (auto-20210813002105-679351) Setting executable bit set on /home/jenkins/minikube-integration (perms=drwxr-xr-x)
I0813 00:23:51.607181 716767 main.go:130] libmachine: (auto-20210813002105-679351) Setting executable bit set on /home/jenkins (perms=drwxr-xr-x)
I0813 00:23:51.607194 716767 main.go:130] libmachine: (auto-20210813002105-679351) Creating domain...
I0813 00:23:51.634568 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | domain auto-20210813002105-679351 has defined MAC address 52:54:00:d0:d0:55 in network default
I0813 00:23:51.635205 716767 main.go:130] libmachine: (auto-20210813002105-679351) Ensuring networks are active...
I0813 00:23:51.635226 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | domain auto-20210813002105-679351 has defined MAC address 52:54:00:8e:56:53 in network mk-auto-20210813002105-679351
I0813 00:23:51.637544 716767 main.go:130] libmachine: (auto-20210813002105-679351) Ensuring network default is active
I0813 00:23:51.637899 716767 main.go:130] libmachine: (auto-20210813002105-679351) Ensuring network mk-auto-20210813002105-679351 is active
I0813 00:23:51.638587 716767 main.go:130] libmachine: (auto-20210813002105-679351) Getting domain xml...
I0813 00:23:51.640716 716767 main.go:130] libmachine: (auto-20210813002105-679351) Creating domain...
I0813 00:23:52.057654 716767 main.go:130] libmachine: (auto-20210813002105-679351) Waiting to get IP...
I0813 00:23:52.058806 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | domain auto-20210813002105-679351 has defined MAC address 52:54:00:8e:56:53 in network mk-auto-20210813002105-679351
I0813 00:23:52.059316 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | unable to find current IP address of domain auto-20210813002105-679351 in network mk-auto-20210813002105-679351
I0813 00:23:52.059346 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:52.059258 716910 retry.go:31] will retry after 263.082536ms: waiting for machine to come up
I0813 00:23:52.323639 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | domain auto-20210813002105-679351 has defined MAC address 52:54:00:8e:56:53 in network mk-auto-20210813002105-679351
I0813 00:23:52.324237 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | unable to find current IP address of domain auto-20210813002105-679351 in network mk-auto-20210813002105-679351
I0813 00:23:52.324335 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:52.324257 716910 retry.go:31] will retry after 381.329545ms: waiting for machine to come up
I0813 00:23:52.706916 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | domain auto-20210813002105-679351 has defined MAC address 52:54:00:8e:56:53 in network mk-auto-20210813002105-679351
I0813 00:23:52.707357 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | unable to find current IP address of domain auto-20210813002105-679351 in network mk-auto-20210813002105-679351
I0813 00:23:52.707381 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:52.707310 716910 retry.go:31] will retry after 422.765636ms: waiting for machine to come up
I0813 00:23:53.131665 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | domain auto-20210813002105-679351 has defined MAC address 52:54:00:8e:56:53 in network mk-auto-20210813002105-679351
I0813 00:23:53.132237 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | unable to find current IP address of domain auto-20210813002105-679351 in network mk-auto-20210813002105-679351
I0813 00:23:53.132270 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:53.132177 716910 retry.go:31] will retry after 473.074753ms: waiting for machine to come up
I0813 00:23:51.700287 716165 pod_ready.go:102] pod "kube-controller-manager-pause-20210813001951-679351" in "kube-system" namespace has status "Ready":"False"
I0813 00:23:54.233827 716165 pod_ready.go:102] pod "kube-controller-manager-pause-20210813001951-679351" in "kube-system" namespace has status "Ready":"False"
I0813 00:23:50.920865 716004 system_pods.go:59] 1 kube-system pods found
I0813 00:23:50.920912 716004 system_pods.go:61] "storage-provisioner" [baa2edae-fbcc-11eb-bfc1-52540060d0ea] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taints that the pod didn't tolerate.)
I0813 00:23:50.920930 716004 retry.go:31] will retry after 597.243124ms: only 1 pod(s) have shown up
I0813 00:23:51.523438 716004 system_pods.go:59] 1 kube-system pods found
I0813 00:23:51.523482 716004 system_pods.go:61] "storage-provisioner" [baa2edae-fbcc-11eb-bfc1-52540060d0ea] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taints that the pod didn't tolerate.)
I0813 00:23:51.523501 716004 retry.go:31] will retry after 789.889932ms: only 1 pod(s) have shown up
I0813 00:23:52.318585 716004 system_pods.go:59] 1 kube-system pods found
I0813 00:23:52.318621 716004 system_pods.go:61] "storage-provisioner" [baa2edae-fbcc-11eb-bfc1-52540060d0ea] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taints that the pod didn't tolerate.)
I0813 00:23:52.318637 716004 retry.go:31] will retry after 951.868007ms: only 1 pod(s) have shown up
I0813 00:23:53.275480 716004 system_pods.go:59] 1 kube-system pods found
I0813 00:23:53.275516 716004 system_pods.go:61] "storage-provisioner" [baa2edae-fbcc-11eb-bfc1-52540060d0ea] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taints that the pod didn't tolerate.)
I0813 00:23:53.275532 716004 retry.go:31] will retry after 1.341783893s: only 1 pod(s) have shown up
I0813 00:23:54.621716 716004 system_pods.go:59] 1 kube-system pods found
I0813 00:23:54.621756 716004 system_pods.go:61] "storage-provisioner" [baa2edae-fbcc-11eb-bfc1-52540060d0ea] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taints that the pod didn't tolerate.)
I0813 00:23:54.621775 716004 retry.go:31] will retry after 1.876813009s: only 1 pod(s) have shown up
I0813 00:23:53.606587 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | domain auto-20210813002105-679351 has defined MAC address 52:54:00:8e:56:53 in network mk-auto-20210813002105-679351
I0813 00:23:53.607105 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | unable to find current IP address of domain auto-20210813002105-679351 in network mk-auto-20210813002105-679351
I0813 00:23:53.607132 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:53.607062 716910 retry.go:31] will retry after 587.352751ms: waiting for machine to come up
I0813 00:23:54.195821 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | domain auto-20210813002105-679351 has defined MAC address 52:54:00:8e:56:53 in network mk-auto-20210813002105-679351
I0813 00:23:54.196281 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | unable to find current IP address of domain auto-20210813002105-679351 in network mk-auto-20210813002105-679351
I0813 00:23:54.196305 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:54.196240 716910 retry.go:31] will retry after 834.206799ms: waiting for machine to come up
I0813 00:23:55.031715 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | domain auto-20210813002105-679351 has defined MAC address 52:54:00:8e:56:53 in network mk-auto-20210813002105-679351
I0813 00:23:55.032162 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | unable to find current IP address of domain auto-20210813002105-679351 in network mk-auto-20210813002105-679351
I0813 00:23:55.032197 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:55.032086 716910 retry.go:31] will retry after 746.553905ms: waiting for machine to come up
I0813 00:23:55.779981 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | domain auto-20210813002105-679351 has defined MAC address 52:54:00:8e:56:53 in network mk-auto-20210813002105-679351
I0813 00:23:55.780498 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | unable to find current IP address of domain auto-20210813002105-679351 in network mk-auto-20210813002105-679351
I0813 00:23:55.780523 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:55.780446 716910 retry.go:31] will retry after 987.362415ms: waiting for machine to come up
I0813 00:23:56.769757 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | domain auto-20210813002105-679351 has defined MAC address 52:54:00:8e:56:53 in network mk-auto-20210813002105-679351
I0813 00:23:56.770266 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | unable to find current IP address of domain auto-20210813002105-679351 in network mk-auto-20210813002105-679351
I0813 00:23:56.770300 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:56.770221 716910 retry.go:31] will retry after 1.189835008s: waiting for machine to come up
I0813 00:23:57.961367 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | domain auto-20210813002105-679351 has defined MAC address 52:54:00:8e:56:53 in network mk-auto-20210813002105-679351
I0813 00:23:57.962155 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | unable to find current IP address of domain auto-20210813002105-679351 in network mk-auto-20210813002105-679351
I0813 00:23:57.962187 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:57.962104 716910 retry.go:31] will retry after 1.677229867s: waiting for machine to come up
I0813 00:23:56.700526 716165 pod_ready.go:102] pod "kube-controller-manager-pause-20210813001951-679351" in "kube-system" namespace has status "Ready":"False"
I0813 00:23:59.203502 716165 pod_ready.go:92] pod "kube-controller-manager-pause-20210813001951-679351" in "kube-system" namespace has status "Ready":"True"
I0813 00:23:59.203528 716165 pod_ready.go:81] duration metric: took 23.531237761s waiting for pod "kube-controller-manager-pause-20210813001951-679351" in "kube-system" namespace to be "Ready" ...
I0813 00:23:59.203548 716165 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-2mkpr" in "kube-system" namespace to be "Ready" ...
I0813 00:23:59.229946 716165 pod_ready.go:92] pod "kube-proxy-2mkpr" in "kube-system" namespace has status "Ready":"True"
I0813 00:23:59.229972 716165 pod_ready.go:81] duration metric: took 26.415102ms waiting for pod "kube-proxy-2mkpr" in "kube-system" namespace to be "Ready" ...
I0813 00:23:59.229988 716165 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-pause-20210813001951-679351" in "kube-system" namespace to be "Ready" ...
I0813 00:23:59.239104 716165 pod_ready.go:92] pod "kube-scheduler-pause-20210813001951-679351" in "kube-system" namespace has status "Ready":"True"
I0813 00:23:59.239127 716165 pod_ready.go:81] duration metric: took 9.130749ms waiting for pod "kube-scheduler-pause-20210813001951-679351" in "kube-system" namespace to be "Ready" ...
I0813 00:23:59.239136 716165 pod_ready.go:38] duration metric: took 30.618753935s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0813 00:23:59.239158 716165 api_server.go:50] waiting for apiserver process to appear ...
I0813 00:23:59.239214 716165 ssh_runner.go:149] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0813 00:23:59.256452 716165 api_server.go:70] duration metric: took 30.826512028s to wait for apiserver process to appear ...
I0813 00:23:59.256483 716165 api_server.go:86] waiting for apiserver healthz status ...
I0813 00:23:59.256496 716165 api_server.go:239] Checking apiserver healthz at https://192.168.127.196:8443/healthz ...
I0813 00:23:59.264912 716165 api_server.go:265] https://192.168.127.196:8443/healthz returned 200:
ok
I0813 00:23:59.268135 716165 api_server.go:139] control plane version: v1.21.3
I0813 00:23:59.268164 716165 api_server.go:129] duration metric: took 11.67345ms to wait for apiserver health ...
I0813 00:23:59.268175 716165 system_pods.go:43] waiting for kube-system pods to appear ...
I0813 00:23:59.276357 716165 system_pods.go:59] 7 kube-system pods found
I0813 00:23:59.276426 716165 system_pods.go:61] "coredns-558bd4d5db-xjmwl" [5897a243-0289-4042-882a-d25cb005813b] Running
I0813 00:23:59.276442 716165 system_pods.go:61] "etcd-pause-20210813001951-679351" [b35637c3-36e1-4d4b-b134-d8ff45654f0b] Running
I0813 00:23:59.276449 716165 system_pods.go:61] "kube-apiserver-pause-20210813001951-679351" [85ba09e9-c18c-45e3-b17e-91c22905e23d] Running
I0813 00:23:59.276456 716165 system_pods.go:61] "kube-controller-manager-pause-20210813001951-679351" [cecc2dae-a7a9-4055-804e-1b2eef4a2618] Running
I0813 00:23:59.276466 716165 system_pods.go:61] "kube-proxy-2mkpr" [59d9290e-34c7-4e80-a909-8d989552ec78] Running
I0813 00:23:59.276473 716165 system_pods.go:61] "kube-scheduler-pause-20210813001951-679351" [6b2abce1-c0bf-4c30-b455-05b50f0431fc] Running
I0813 00:23:59.276482 716165 system_pods.go:61] "storage-provisioner" [b781b362-9644-4c96-a463-4cb61bc5ab58] Running
I0813 00:23:59.276489 716165 system_pods.go:74] duration metric: took 8.308397ms to wait for pod list to return data ...
I0813 00:23:59.276506 716165 default_sa.go:34] waiting for default service account to be created ...
I0813 00:23:59.285171 716165 default_sa.go:45] found service account: "default"
I0813 00:23:59.285197 716165 default_sa.go:55] duration metric: took 8.683697ms for default service account to be created ...
I0813 00:23:59.285206 716165 system_pods.go:116] waiting for k8s-apps to be running ...
I0813 00:23:59.292591 716165 system_pods.go:86] 7 kube-system pods found
I0813 00:23:59.292661 716165 system_pods.go:89] "coredns-558bd4d5db-xjmwl" [5897a243-0289-4042-882a-d25cb005813b] Running
I0813 00:23:59.292679 716165 system_pods.go:89] "etcd-pause-20210813001951-679351" [b35637c3-36e1-4d4b-b134-d8ff45654f0b] Running
I0813 00:23:59.292686 716165 system_pods.go:89] "kube-apiserver-pause-20210813001951-679351" [85ba09e9-c18c-45e3-b17e-91c22905e23d] Running
I0813 00:23:59.292693 716165 system_pods.go:89] "kube-controller-manager-pause-20210813001951-679351" [cecc2dae-a7a9-4055-804e-1b2eef4a2618] Running
I0813 00:23:59.292699 716165 system_pods.go:89] "kube-proxy-2mkpr" [59d9290e-34c7-4e80-a909-8d989552ec78] Running
I0813 00:23:59.292705 716165 system_pods.go:89] "kube-scheduler-pause-20210813001951-679351" [6b2abce1-c0bf-4c30-b455-05b50f0431fc] Running
I0813 00:23:59.292710 716165 system_pods.go:89] "storage-provisioner" [b781b362-9644-4c96-a463-4cb61bc5ab58] Running
I0813 00:23:59.292718 716165 system_pods.go:126] duration metric: took 7.505768ms to wait for k8s-apps to be running ...
I0813 00:23:59.292732 716165 system_svc.go:44] waiting for kubelet service to be running ....
I0813 00:23:59.292784 716165 ssh_runner.go:149] Run: sudo systemctl is-active --quiet service kubelet
I0813 00:23:59.314742 716165 system_svc.go:56] duration metric: took 21.999634ms WaitForService to wait for kubelet.
I0813 00:23:59.314836 716165 kubeadm.go:547] duration metric: took 30.884900181s to wait for : map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] ...
I0813 00:23:59.314907 716165 node_conditions.go:102] verifying NodePressure condition ...
I0813 00:23:59.334947 716165 node_conditions.go:122] node storage ephemeral capacity is 17784752Ki
I0813 00:23:59.334986 716165 node_conditions.go:123] node cpu capacity is 2
I0813 00:23:59.335009 716165 node_conditions.go:105] duration metric: took 20.073824ms to run NodePressure ...
I0813 00:23:59.335022 716165 start.go:231] waiting for startup goroutines ...
I0813 00:23:59.414520 716165 start.go:462] kubectl: 1.20.5, cluster: 1.21.3 (minor skew: 1)
I0813 00:23:59.416641 716165 out.go:177] * Done! kubectl is now configured to use "pause-20210813001951-679351" cluster and "default" namespace by default
I0813 00:23:56.504125 716004 system_pods.go:59] 1 kube-system pods found
I0813 00:23:56.504172 716004 system_pods.go:61] "storage-provisioner" [baa2edae-fbcc-11eb-bfc1-52540060d0ea] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taints that the pod didn't tolerate.)
I0813 00:23:56.504191 716004 retry.go:31] will retry after 2.6934314s: only 1 pod(s) have shown up
I0813 00:23:59.214548 716004 system_pods.go:59] 1 kube-system pods found
I0813 00:23:59.214588 716004 system_pods.go:61] "storage-provisioner" [baa2edae-fbcc-11eb-bfc1-52540060d0ea] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taints that the pod didn't tolerate.)
I0813 00:23:59.214606 716004 retry.go:31] will retry after 2.494582248s: only 1 pod(s) have shown up
I0813 00:23:59.640905 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | domain auto-20210813002105-679351 has defined MAC address 52:54:00:8e:56:53 in network mk-auto-20210813002105-679351
I0813 00:23:59.641364 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | unable to find current IP address of domain auto-20210813002105-679351 in network mk-auto-20210813002105-679351
I0813 00:23:59.641402 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:23:59.641276 716910 retry.go:31] will retry after 2.346016261s: waiting for machine to come up
I0813 00:24:01.989042 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | domain auto-20210813002105-679351 has defined MAC address 52:54:00:8e:56:53 in network mk-auto-20210813002105-679351
I0813 00:24:01.989580 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | unable to find current IP address of domain auto-20210813002105-679351 in network mk-auto-20210813002105-679351
I0813 00:24:01.989606 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:24:01.989539 716910 retry.go:31] will retry after 3.36678925s: waiting for machine to come up
I0813 00:24:04.629229 716004 system_pods.go:59] 2 kube-system pods found
I0813 00:24:04.629270 716004 system_pods.go:61] "coredns-fb8b8dccf-stsv6" [c23d810d-fbcc-11eb-bfc1-52540060d0ea] Pending
I0813 00:24:04.629277 716004 system_pods.go:61] "storage-provisioner" [baa2edae-fbcc-11eb-bfc1-52540060d0ea] Pending
I0813 00:24:04.629286 716004 system_pods.go:74] duration metric: took 15.902582355s to wait for pod list to return data ...
I0813 00:24:04.629298 716004 kubeadm.go:547] duration metric: took 16.63831269s to wait for : map[apiserver:true system_pods:true] ...
I0813 00:24:04.629314 716004 node_conditions.go:102] verifying NodePressure condition ...
I0813 00:24:04.723771 716004 node_conditions.go:122] node storage ephemeral capacity is 17784752Ki
I0813 00:24:04.723817 716004 node_conditions.go:123] node cpu capacity is 2
I0813 00:24:04.723835 716004 node_conditions.go:105] duration metric: took 94.514701ms to run NodePressure ...
I0813 00:24:04.723848 716004 start.go:231] waiting for startup goroutines ...
I0813 00:24:04.803131 716004 start.go:462] kubectl: 1.20.5, cluster: 1.14.0 (minor skew: 6)
I0813 00:24:04.805595 716004 out.go:177]
W0813 00:24:04.805823 716004 out.go:242] ! /usr/local/bin/kubectl is version 1.20.5, which may have incompatibilites with Kubernetes 1.14.0.
I0813 00:24:04.807470 716004 out.go:177] - Want kubectl v1.14.0? Try 'minikube kubectl -- get pods -A'
I0813 00:24:04.809337 716004 out.go:177] * Done! kubectl is now configured to use "kubernetes-upgrade-20210813002240-679351" cluster and "default" namespace by default
I0813 00:24:05.357959 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | domain auto-20210813002105-679351 has defined MAC address 52:54:00:8e:56:53 in network mk-auto-20210813002105-679351
I0813 00:24:05.358449 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | unable to find current IP address of domain auto-20210813002105-679351 in network mk-auto-20210813002105-679351
I0813 00:24:05.358483 716767 main.go:130] libmachine: (auto-20210813002105-679351) DBG | I0813 00:24:05.358397 716910 retry.go:31] will retry after 3.11822781s: waiting for machine to come up
*
* ==> container status <==
* CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
a4a4ec1132e56 bc2bb319a7038 25 seconds ago Running kube-controller-manager 2 a8ae77b235803
7899222a89a71 6e38f40d628db 40 seconds ago Running storage-provisioner 0 4a6e63928b11f
d30696c5405d8 296a6d5035e2d 52 seconds ago Running coredns 1 f5e6ffa407fcd
6204b21ab9c2c adb2816ea823a 54 seconds ago Running kube-proxy 1 46f36163c53e1
705c524b7bd2d 0369cf4303ffd 54 seconds ago Running etcd 1 accc3895e9638
64c935095bced 6be0dc1302e30 54 seconds ago Running kube-scheduler 1 2c9091a6bd8d7
85bd885bbae1e 3d174f00aa39e 55 seconds ago Running kube-apiserver 1 bb4e7930b2ada
9cb0b80b9734a bc2bb319a7038 55 seconds ago Exited kube-controller-manager 1 a8ae77b235803
efb6b8992aa82 296a6d5035e2d 2 minutes ago Exited coredns 0 36cd319da9139
c11b8a977685b adb2816ea823a 2 minutes ago Exited kube-proxy 0 7d0f0371d8768
2efebee19d7a6 0369cf4303ffd 2 minutes ago Exited etcd 0 54509c8ec15d2
3874ae5baf2d8 3d174f00aa39e 2 minutes ago Exited kube-apiserver 0 c77d72f80a55b
3afc8c09f8286 6be0dc1302e30 2 minutes ago Exited kube-scheduler 0 1bd10dcb9a7ea
*
* ==> containerd <==
* -- Logs begin at Fri 2021-08-13 00:20:51 UTC, end at Fri 2021-08-13 00:24:10 UTC. --
Aug 13 00:23:18 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:18.421885649Z" level=info msg="StartContainer for \"6204b21ab9c2c6e799da39874e9eb93e39284c65231dd05603088db2fa6b8e6b\" returns successfully"
Aug 13 00:23:18 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:18.634895282Z" level=info msg="StartContainer for \"d30696c5405d8e3fbc2bfe7ef7e391b98c301d6056d08f1d32a9614f101edc6f\" returns successfully"
Aug 13 00:23:27 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:27.651258400Z" level=info msg="Finish piping stderr of container \"9cb0b80b9734ad17c461045a38b0d0a6b8d8686ef46bf8dd99a00d62b8cc67fe\""
Aug 13 00:23:27 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:27.652374051Z" level=info msg="Finish piping stdout of container \"9cb0b80b9734ad17c461045a38b0d0a6b8d8686ef46bf8dd99a00d62b8cc67fe\""
Aug 13 00:23:27 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:27.654645594Z" level=info msg="TaskExit event &TaskExit{ContainerID:9cb0b80b9734ad17c461045a38b0d0a6b8d8686ef46bf8dd99a00d62b8cc67fe,ID:9cb0b80b9734ad17c461045a38b0d0a6b8d8686ef46bf8dd99a00d62b8cc67fe,Pid:4197,ExitStatus:255,ExitedAt:2021-08-13 00:23:27.653997105 +0000 UTC,XXX_unrecognized:[],}"
Aug 13 00:23:27 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:27.765885334Z" level=info msg="shim disconnected" id=9cb0b80b9734ad17c461045a38b0d0a6b8d8686ef46bf8dd99a00d62b8cc67fe
Aug 13 00:23:27 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:27.766396530Z" level=error msg="copy shim log" error="read /proc/self/fd/58: file already closed"
Aug 13 00:23:28 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:28.010584031Z" level=info msg="RemoveContainer for \"112c2918d72a90f4b0bbe9d6e1b3134149bf89a77f20de44d4059a1ad6edeff4\""
Aug 13 00:23:28 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:28.021541263Z" level=info msg="RemoveContainer for \"112c2918d72a90f4b0bbe9d6e1b3134149bf89a77f20de44d4059a1ad6edeff4\" returns successfully"
Aug 13 00:23:29 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:29.821371568Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:b781b362-9644-4c96-a463-4cb61bc5ab58,Namespace:kube-system,Attempt:0,}"
Aug 13 00:23:29 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:29.875382976Z" level=info msg="starting signal loop" namespace=k8s.io path=/run/containerd/io.containerd.runtime.v2.task/k8s.io/4a6e63928b11fedafa692c25459f3246b10afd3d4379e021576f2665ed408565 pid=4636
Aug 13 00:23:30 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:30.445012017Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:b781b362-9644-4c96-a463-4cb61bc5ab58,Namespace:kube-system,Attempt:0,} returns sandbox id \"4a6e63928b11fedafa692c25459f3246b10afd3d4379e021576f2665ed408565\""
Aug 13 00:23:30 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:30.455717251Z" level=info msg="CreateContainer within sandbox \"4a6e63928b11fedafa692c25459f3246b10afd3d4379e021576f2665ed408565\" for container &ContainerMetadata{Name:storage-provisioner,Attempt:0,}"
Aug 13 00:23:30 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:30.529886160Z" level=info msg="CreateContainer within sandbox \"4a6e63928b11fedafa692c25459f3246b10afd3d4379e021576f2665ed408565\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"7899222a89a71ffe4fb8d232a5b3799b5c94ea7e430406ce44654a4c80946ed9\""
Aug 13 00:23:30 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:30.531938735Z" level=info msg="StartContainer for \"7899222a89a71ffe4fb8d232a5b3799b5c94ea7e430406ce44654a4c80946ed9\""
Aug 13 00:23:30 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:30.785895579Z" level=info msg="StartContainer for \"7899222a89a71ffe4fb8d232a5b3799b5c94ea7e430406ce44654a4c80946ed9\" returns successfully"
Aug 13 00:23:45 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:45.128731300Z" level=info msg="CreateContainer within sandbox \"a8ae77b235803b4e19e0eb0a6e8e4d70a30100102282506be869694a0b95d264\" for container &ContainerMetadata{Name:kube-controller-manager,Attempt:2,}"
Aug 13 00:23:45 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:45.216530954Z" level=info msg="CreateContainer within sandbox \"a8ae77b235803b4e19e0eb0a6e8e4d70a30100102282506be869694a0b95d264\" for &ContainerMetadata{Name:kube-controller-manager,Attempt:2,} returns container id \"a4a4ec1132e56e93b77cf82333943e5daaab8efa48df65ced586f5a324cde37d\""
Aug 13 00:23:45 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:45.221615537Z" level=info msg="StartContainer for \"a4a4ec1132e56e93b77cf82333943e5daaab8efa48df65ced586f5a324cde37d\""
Aug 13 00:23:45 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:45.473360047Z" level=info msg="StartContainer for \"a4a4ec1132e56e93b77cf82333943e5daaab8efa48df65ced586f5a324cde37d\" returns successfully"
Aug 13 00:23:54 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:54.024224370Z" level=info msg="StopPodSandbox for \"010dd466bc9609853f86415ca26b64dbc3754ff0bcb704d6a8abdf03248fe11a\""
Aug 13 00:23:54 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:54.024332361Z" level=info msg="TearDown network for sandbox \"010dd466bc9609853f86415ca26b64dbc3754ff0bcb704d6a8abdf03248fe11a\" successfully"
Aug 13 00:23:54 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:54.024344869Z" level=info msg="StopPodSandbox for \"010dd466bc9609853f86415ca26b64dbc3754ff0bcb704d6a8abdf03248fe11a\" returns successfully"
Aug 13 00:23:54 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:54.026182269Z" level=info msg="RemovePodSandbox for \"010dd466bc9609853f86415ca26b64dbc3754ff0bcb704d6a8abdf03248fe11a\""
Aug 13 00:23:54 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:54.033764742Z" level=info msg="RemovePodSandbox \"010dd466bc9609853f86415ca26b64dbc3754ff0bcb704d6a8abdf03248fe11a\" returns successfully"
*
* ==> coredns [d30696c5405d8e3fbc2bfe7ef7e391b98c301d6056d08f1d32a9614f101edc6f] <==
* [INFO] plugin/ready: Still waiting on: "kubernetes"
.:53
[INFO] plugin/reload: Running configuration MD5 = 21fa5447a9370c672668c17fadc8028a
CoreDNS-1.8.0
linux/amd64, go1.15.3, 054c9ae
[INFO] plugin/ready: Still waiting on: "kubernetes"
*
* ==> coredns [efb6b8992aa826974c98985c6dbeb065b7a93d6ceeacacae98b06ec18bbfd5bb] <==
* I0813 00:22:38.694893 1 trace.go:205] Trace[2019727887]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.19.2/tools/cache/reflector.go:156 (13-Aug-2021 00:22:08.692) (total time: 30002ms):
Trace[2019727887]: [30.002199421s] [30.002199421s] END
E0813 00:22:38.695325 1 reflector.go:127] pkg/mod/k8s.io/client-go@v0.19.2/tools/cache/reflector.go:156: Failed to watch *v1.Namespace: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
I0813 00:22:38.695485 1 trace.go:205] Trace[911902081]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.19.2/tools/cache/reflector.go:156 (13-Aug-2021 00:22:08.694) (total time: 30001ms):
Trace[911902081]: [30.001141967s] [30.001141967s] END
E0813 00:22:38.695755 1 reflector.go:127] pkg/mod/k8s.io/client-go@v0.19.2/tools/cache/reflector.go:156: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
I0813 00:22:38.695034 1 trace.go:205] Trace[1427131847]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.19.2/tools/cache/reflector.go:156 (13-Aug-2021 00:22:08.694) (total time: 30000ms):
Trace[1427131847]: [30.000422913s] [30.000422913s] END
E0813 00:22:38.696466 1 reflector.go:127] pkg/mod/k8s.io/client-go@v0.19.2/tools/cache/reflector.go:156: Failed to watch *v1.Endpoints: failed to list *v1.Endpoints: Get "https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/ready: Still waiting on: "kubernetes"
.:53
[INFO] plugin/reload: Running configuration MD5 = db32ca3650231d74073ff4cf814959a7
CoreDNS-1.8.0
linux/amd64, go1.15.3, 054c9ae
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] Reloading
[INFO] plugin/health: Going into lameduck mode for 5s
[INFO] plugin/reload: Running configuration MD5 = 21fa5447a9370c672668c17fadc8028a
[INFO] Reloading complete
*
* ==> describe nodes <==
* Name: pause-20210813001951-679351
Roles: control-plane,master
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=pause-20210813001951-679351
kubernetes.io/os=linux
minikube.k8s.io/commit=dc1c3ca26e9449ce488a773126b8450402c94a19
minikube.k8s.io/name=pause-20210813001951-679351
minikube.k8s.io/updated_at=2021_08_13T00_21_48_0700
minikube.k8s.io/version=v1.22.0
node-role.kubernetes.io/control-plane=
node-role.kubernetes.io/master=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 13 Aug 2021 00:21:44 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: pause-20210813001951-679351
AcquireTime: <unset>
RenewTime: Fri, 13 Aug 2021 00:23:57 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 13 Aug 2021 00:21:57 +0000 Fri, 13 Aug 2021 00:21:38 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 13 Aug 2021 00:21:57 +0000 Fri, 13 Aug 2021 00:21:38 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 13 Aug 2021 00:21:57 +0000 Fri, 13 Aug 2021 00:21:38 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 13 Aug 2021 00:21:57 +0000 Fri, 13 Aug 2021 00:21:57 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.127.196
Hostname: pause-20210813001951-679351
Capacity:
cpu: 2
ephemeral-storage: 17784752Ki
hugepages-2Mi: 0
memory: 2033024Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 17784752Ki
hugepages-2Mi: 0
memory: 2033024Ki
pods: 110
System Info:
Machine ID: 9109403b29744bfb99029b25cc4f9da7
System UUID: 9109403b-2974-4bfb-9902-9b25cc4f9da7
Boot ID: 0d1f634d-bf20-456d-8420-8e644eba3e38
Kernel Version: 4.19.182
OS Image: Buildroot 2020.02.12
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.4.9
Kubelet Version: v1.21.3
Kube-Proxy Version: v1.21.3
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (7 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-558bd4d5db-xjmwl 100m (5%!)(MISSING) 0 (0%!)(MISSING) 70Mi (3%!)(MISSING) 170Mi (8%!)(MISSING) 2m6s
kube-system etcd-pause-20210813001951-679351 100m (5%!)(MISSING) 0 (0%!)(MISSING) 100Mi (5%!)(MISSING) 0 (0%!)(MISSING) 2m26s
kube-system kube-apiserver-pause-20210813001951-679351 250m (12%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m17s
kube-system kube-controller-manager-pause-20210813001951-679351 200m (10%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m17s
kube-system kube-proxy-2mkpr 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m6s
kube-system kube-scheduler-pause-20210813001951-679351 100m (5%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m17s
kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 42s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (37%!)(MISSING) 0 (0%!)(MISSING)
memory 170Mi (8%!)(MISSING) 170Mi (8%!)(MISSING)
ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal NodeHasSufficientMemory 2m39s (x6 over 2m39s) kubelet Node pause-20210813001951-679351 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m39s (x6 over 2m39s) kubelet Node pause-20210813001951-679351 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 2m39s (x5 over 2m39s) kubelet Node pause-20210813001951-679351 status is now: NodeHasSufficientPID
Normal Starting 2m18s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 2m17s kubelet Node pause-20210813001951-679351 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m17s kubelet Node pause-20210813001951-679351 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 2m17s kubelet Node pause-20210813001951-679351 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 2m17s kubelet Updated Node Allocatable limit across pods
Normal NodeReady 2m14s kubelet Node pause-20210813001951-679351 status is now: NodeReady
Normal Starting 2m3s kube-proxy Starting kube-proxy.
Normal Starting 44s kube-proxy Starting kube-proxy.
*
* ==> dmesg <==
* on the kernel command line
[ +0.000130] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
[ +4.671269] systemd-fstab-generator[1160]: Ignoring "noauto" for root device
[ +0.045892] systemd[1]: system-getty.slice: unit configures an IP firewall, but the local system does not support BPF/cgroup firewalling.
[ +0.000003] systemd[1]: (This warning is only shown for the first unit using IP firewalling.)
[ +1.192244] SELinux: unrecognized netlink message: protocol=0 nlmsg_type=106 sclass=netlink_route_socket pid=1717 comm=systemd-network
[ +2.439853] NFSD: the nfsdcld client tracking upcall will be removed in 3.10. Please transition to using nfsdcltrack.
[ +2.154553] vboxguest: loading out-of-tree module taints kernel.
[ +0.006032] vboxguest: PCI device not found, probably running on physical hardware.
[Aug13 00:21] systemd-fstab-generator[2094]: Ignoring "noauto" for root device
[ +0.270933] systemd-fstab-generator[2126]: Ignoring "noauto" for root device
[ +0.157086] systemd-fstab-generator[2141]: Ignoring "noauto" for root device
[ +0.205044] systemd-fstab-generator[2172]: Ignoring "noauto" for root device
[ +8.129227] systemd-fstab-generator[2377]: Ignoring "noauto" for root device
[ +21.150444] systemd-fstab-generator[2810]: Ignoring "noauto" for root device
[Aug13 00:22] kauditd_printk_skb: 38 callbacks suppressed
[ +41.503227] kauditd_printk_skb: 65 callbacks suppressed
[ +6.452067] NFSD: Unable to end grace period: -110
[Aug13 00:23] systemd-fstab-generator[3489]: Ignoring "noauto" for root device
[ +0.238108] systemd-fstab-generator[3502]: Ignoring "noauto" for root device
[ +0.253277] systemd-fstab-generator[3527]: Ignoring "noauto" for root device
[ +17.104028] kauditd_printk_skb: 29 callbacks suppressed
[ +32.355407] systemd-fstab-generator[4902]: Ignoring "noauto" for root device
[Aug13 00:24] systemd-fstab-generator[5070]: Ignoring "noauto" for root device
[ +3.299751] systemd-fstab-generator[5099]: Ignoring "noauto" for root device
*
* ==> etcd [2efebee19d7a6bd77fd1333dab2cc543c575e8c6babdae865b90a1cf0fa48744] <==
* 2021-08-13 00:22:07.230197 W | etcdserver: read-only range request "key:\"/registry/endpointslices/kube-system/kube-dns-5fjzs\" " with result "range_response_count:1 size:1013" took too long (671.993662ms) to execute
2021-08-13 00:22:07.230468 W | etcdserver: read-only range request "key:\"/registry/serviceaccounts/kube-system/coredns\" " with result "range_response_count:1 size:217" took too long (741.585441ms) to execute
2021-08-13 00:22:14.207544 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2021-08-13 00:22:24.208238 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2021-08-13 00:22:34.210334 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2021-08-13 00:22:45.207896 W | etcdserver/api/etcdhttp: /health error; QGET failed etcdserver: request timed out (status code 503)
2021-08-13 00:22:45.919806 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context deadline exceeded" took too long (2.000045576s) to execute
2021-08-13 00:22:47.101521 W | wal: sync duration of 4.051774181s, expected less than 1s
2021-08-13 00:22:47.103383 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/coredns-558bd4d5db-xjmwl\" " with result "range_response_count:1 size:4746" took too long (3.677716425s) to execute
2021-08-13 00:22:47.104500 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-system\" " with result "range_response_count:1 size:351" took too long (2.60570306s) to execute
2021-08-13 00:22:47.104825 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:341" took too long (894.799494ms) to execute
2021-08-13 00:22:47.105989 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/coredns-558bd4d5db-xjmwl\" " with result "range_response_count:1 size:4746" took too long (1.568667767s) to execute
2021-08-13 00:22:47.106790 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (1.176235696s) to execute
2021-08-13 00:22:47.107462 W | etcdserver: read-only range request "key:\"/registry/flowschemas/exempt\" " with result "range_response_count:1 size:879" took too long (2.454270208s) to execute
2021-08-13 00:22:47.950458 W | etcdserver: request "header:<ID:12242045188531646344 username:\"kube-apiserver-etcd-client\" auth_revision:1 > lease_grant:<ttl:3660-second id:29e47b3ce2b56f87>" with result "size:41" took too long (470.318775ms) to execute
2021-08-13 00:22:47.951739 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-node-lease\" " with result "range_response_count:1 size:363" took too long (801.202969ms) to execute
2021-08-13 00:22:47.955016 W | etcdserver: read-only range request "key:\"/registry/prioritylevelconfigurations/exempt\" " with result "range_response_count:1 size:371" took too long (804.019176ms) to execute
2021-08-13 00:22:47.956311 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/coredns-558bd4d5db-xjmwl\" " with result "range_response_count:1 size:4568" took too long (530.617681ms) to execute
2021-08-13 00:22:47.958454 W | etcdserver: read-only range request "key:\"/registry/priorityclasses/\" range_end:\"/registry/priorityclasses0\" count_only:true " with result "range_response_count:0 size:7" took too long (309.016563ms) to execute
2021-08-13 00:22:48.801008 W | etcdserver: read-only range request "key:\"/registry/minions/pause-20210813001951-679351\" " with result "range_response_count:1 size:4776" took too long (768.144463ms) to execute
2021-08-13 00:22:48.802711 W | etcdserver: read-only range request "key:\"/registry/controllers/\" range_end:\"/registry/controllers0\" count_only:true " with result "range_response_count:0 size:5" took too long (347.906261ms) to execute
2021-08-13 00:22:49.019606 W | etcdserver: read-only range request "key:\"/registry/serviceaccounts/default/\" range_end:\"/registry/serviceaccounts/default0\" " with result "range_response_count:1 size:209" took too long (111.551006ms) to execute
2021-08-13 00:22:49.020521 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (100.648686ms) to execute
2021-08-13 00:22:54.217451 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2021-08-13 00:23:04.208424 I | etcdserver/api/etcdhttp: /health OK (status code 200)
*
* ==> etcd [705c524b7bd2d071e133ec74fb1f433cab624312145e8e0d2e4b19e7936be85b] <==
* 2021-08-13 00:23:25.998767 W | etcdserver: read-only range request "key:\"/registry/leases/kube-node-lease/pause-20210813001951-679351\" " with result "range_response_count:1 size:671" took too long (1.83123856s) to execute
2021-08-13 00:23:25.999024 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-apiserver-pause-20210813001951-679351\" " with result "range_response_count:1 size:6537" took too long (1.83134851s) to execute
2021-08-13 00:23:25.999710 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/default/kubernetes\" " with result "range_response_count:1 size:423" took too long (1.836183835s) to execute
2021-08-13 00:23:26.000315 W | etcdserver: read-only range request "key:\"/registry/serviceaccounts/kube-system/kube-proxy\" " with result "range_response_count:1 size:226" took too long (1.856850168s) to execute
2021-08-13 00:23:26.003298 W | etcdserver: read-only range request "key:\"/registry/prioritylevelconfigurations/exempt\" " with result "range_response_count:1 size:371" took too long (1.862496869s) to execute
2021-08-13 00:23:26.866787 W | etcdserver: read-only range request "key:\"/registry/clusterrolebindings/\" range_end:\"/registry/clusterrolebindings0\" " with result "range_response_count:50 size:37065" took too long (849.021882ms) to execute
2021-08-13 00:23:26.868169 W | etcdserver: request "header:<ID:12242045188557790365 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/leases/kube-node-lease/pause-20210813001951-679351\" mod_revision:486 > success:<request_put:<key:\"/registry/leases/kube-node-lease/pause-20210813001951-679351\" value_size:587 >> failure:<request_range:<key:\"/registry/leases/kube-node-lease/pause-20210813001951-679351\" > >>" with result "size:16" took too long (533.569848ms) to execute
2021-08-13 00:23:26.875022 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (380.351682ms) to execute
2021-08-13 00:23:26.888255 W | etcdserver: read-only range request "key:\"/registry/flowschemas/exempt\" " with result "range_response_count:1 size:879" took too long (866.035216ms) to execute
2021-08-13 00:23:26.891763 W | etcdserver: read-only range request "key:\"/registry/minions/pause-20210813001951-679351\" " with result "range_response_count:1 size:4776" took too long (869.868576ms) to execute
2021-08-13 00:23:26.892452 W | etcdserver: read-only range request "key:\"/registry/masterleases/\" range_end:\"/registry/masterleases0\" " with result "range_response_count:0 size:5" took too long (870.451036ms) to execute
2021-08-13 00:23:26.892806 W | etcdserver: read-only range request "key:\"/registry/priorityclasses/system-cluster-critical\" " with result "range_response_count:1 size:476" took too long (870.772194ms) to execute
2021-08-13 00:23:26.893782 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (353.420112ms) to execute
2021-08-13 00:23:27.618510 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/kube-proxy-2mkpr.169ab5d1b9f0872d\" " with result "range_response_count:1 size:826" took too long (312.125535ms) to execute
2021-08-13 00:23:27.618804 W | etcdserver: read-only range request "key:\"/registry/clusterroles/system:controller:certificate-controller\" " with result "range_response_count:1 size:1142" took too long (324.375348ms) to execute
2021-08-13 00:23:27.619230 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (251.508486ms) to execute
2021-08-13 00:23:35.057151 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2021-08-13 00:23:44.208010 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2021-08-13 00:23:54.207487 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2021-08-13 00:24:03.753011 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/kube-system/k8s.io-minikube-hostpath\" " with result "range_response_count:1 size:1125" took too long (341.385574ms) to execute
WARNING: 2021/08/13 00:24:07 grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
2021-08-13 00:24:09.122960 W | wal: sync duration of 3.322047764s, expected less than 1s
2021-08-13 00:24:09.124505 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:341" took too long (1.247717726s) to execute
2021-08-13 00:24:09.125866 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/kube-system/k8s.io-minikube-hostpath\" " with result "range_response_count:1 size:1125" took too long (1.315223316s) to execute
2021-08-13 00:24:09.686753 W | etcdserver: request "header:<ID:12242045188557790983 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/masterleases/192.168.127.196\" mod_revision:580 > success:<request_put:<key:\"/registry/masterleases/192.168.127.196\" value_size:70 lease:3018673151703015173 >> failure:<request_range:<key:\"/registry/masterleases/192.168.127.196\" > >>" with result "size:16" took too long (348.846917ms) to execute
*
* ==> kernel <==
* 00:24:11 up 3 min, 0 users, load average: 1.77, 0.97, 0.39
Linux pause-20210813001951-679351 4.19.182 #1 SMP Fri Aug 6 09:11:32 UTC 2021 x86_64 GNU/Linux
PRETTY_NAME="Buildroot 2020.02.12"
*
* ==> kube-apiserver [3874ae5baf2d856570fa5534f52778b464323afbd92eca54de5a983517dbbb65] <==
* Trace[265158055]: [552.827013ms] [552.827013ms] END
I0813 00:22:48.803967 1 trace.go:205] Trace[40150974]: "GuaranteedUpdate etcd3" type:*apps.Deployment (13-Aug-2021 00:22:48.035) (total time: 768ms):
Trace[40150974]: ---"Transaction committed" 766ms (00:22:00.803)
Trace[40150974]: [768.574416ms] [768.574416ms] END
I0813 00:22:48.805449 1 trace.go:205] Trace[38850226]: "Update" url:/apis/apps/v1/namespaces/kube-system/deployments/coredns/status,user-agent:kube-controller-manager/v1.21.3 (linux/amd64) kubernetes/ca643a4/system:serviceaccount:kube-system:deployment-controller,client:192.168.127.196,accept:application/vnd.kubernetes.protobuf, */*,protocol:HTTP/2.0 (13-Aug-2021 00:22:48.034) (total time: 770ms):
Trace[38850226]: ---"Object stored in database" 769ms (00:22:00.805)
Trace[38850226]: [770.627081ms] [770.627081ms] END
I0813 00:22:48.807911 1 trace.go:205] Trace[1153692894]: "GuaranteedUpdate etcd3" type:*discovery.EndpointSlice (13-Aug-2021 00:22:48.029) (total time: 777ms):
Trace[1153692894]: ---"Transaction committed" 776ms (00:22:00.807)
Trace[1153692894]: [777.942281ms] [777.942281ms] END
I0813 00:22:48.811870 1 trace.go:205] Trace[2055050687]: "Get" url:/api/v1/nodes/pause-20210813001951-679351,user-agent:minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format,client:192.168.127.1,accept:application/json, */*,protocol:HTTP/2.0 (13-Aug-2021 00:22:48.030) (total time: 780ms):
Trace[2055050687]: ---"About to write a response" 779ms (00:22:00.810)
Trace[2055050687]: [780.446725ms] [780.446725ms] END
I0813 00:22:48.815873 1 trace.go:205] Trace[1709682153]: "Update" url:/apis/discovery.k8s.io/v1/namespaces/kube-system/endpointslices/kube-dns-5fjzs,user-agent:kube-controller-manager/v1.21.3 (linux/amd64) kubernetes/ca643a4/system:serviceaccount:kube-system:endpointslice-controller,client:192.168.127.196,accept:application/vnd.kubernetes.protobuf, */*,protocol:HTTP/2.0 (13-Aug-2021 00:22:48.029) (total time: 786ms):
Trace[1709682153]: ---"Object stored in database" 785ms (00:22:00.815)
Trace[1709682153]: [786.028644ms] [786.028644ms] END
I0813 00:22:48.812641 1 trace.go:205] Trace[1460083222]: "GuaranteedUpdate etcd3" type:*core.Endpoints (13-Aug-2021 00:22:48.029) (total time: 783ms):
Trace[1460083222]: ---"Transaction committed" 782ms (00:22:00.812)
Trace[1460083222]: [783.305236ms] [783.305236ms] END
I0813 00:22:48.819982 1 trace.go:205] Trace[920420538]: "Update" url:/api/v1/namespaces/kube-system/endpoints/kube-dns,user-agent:kube-controller-manager/v1.21.3 (linux/amd64) kubernetes/ca643a4/system:serviceaccount:kube-system:endpoint-controller,client:192.168.127.196,accept:application/vnd.kubernetes.protobuf, */*,protocol:HTTP/2.0 (13-Aug-2021 00:22:48.029) (total time: 790ms):
Trace[920420538]: ---"Object stored in database" 790ms (00:22:00.819)
Trace[920420538]: [790.783982ms] [790.783982ms] END
I0813 00:23:04.050873 1 client.go:360] parsed scheme: "passthrough"
I0813 00:23:04.051445 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I0813 00:23:04.051754 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
*
* ==> kube-apiserver [85bd885bbae1eb206d16fda70dde9e78726f0563495bc0c5f64cf2083b9a7bf7] <==
* I0813 00:23:26.905285 1 storage_scheduling.go:148] all system priority classes are created successfully or already exist.
I0813 00:23:29.331854 1 controller.go:611] quota admission added evaluator for: serviceaccounts
I0813 00:23:29.366533 1 controller.go:611] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0813 00:23:29.384370 1 controller.go:611] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0813 00:23:29.403160 1 controller.go:611] quota admission added evaluator for: endpoints
I0813 00:23:29.518332 1 controller.go:611] quota admission added evaluator for: events.events.k8s.io
I0813 00:23:57.987189 1 controller.go:611] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0813 00:24:01.430291 1 client.go:360] parsed scheme: "passthrough"
I0813 00:24:01.430491 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I0813 00:24:01.430526 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
E0813 00:24:07.259737 1 status.go:71] apiserver received an error that is not an metav1.Status: &errors.errorString{s:"context canceled"}: context canceled
E0813 00:24:07.259860 1 status.go:71] apiserver received an error that is not an metav1.Status: &errors.errorString{s:"client disconnected"}: client disconnected
E0813 00:24:07.263006 1 writers.go:117] apiserver was unable to write a JSON response: http: Handler timeout
E0813 00:24:07.263568 1 wrap.go:54] timeout or abort while handling: GET "/apis/storage.k8s.io/v1/csinodes/pause-20210813001951-679351"
E0813 00:24:07.265357 1 status.go:71] apiserver received an error that is not an metav1.Status: &errors.errorString{s:"http: Handler timeout"}: http: Handler timeout
E0813 00:24:07.268867 1 writers.go:130] apiserver was unable to write a fallback JSON response: http: Handler timeout
I0813 00:24:09.127818 1 trace.go:205] Trace[986017853]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.21.3 (linux/amd64) kubernetes/ca643a4,client:127.0.0.1,accept:application/vnd.kubernetes.protobuf, */*,protocol:HTTP/2.0 (13-Aug-2021 00:24:07.874) (total time: 1253ms):
Trace[986017853]: ---"About to write a response" 1253ms (00:24:00.127)
Trace[986017853]: [1.253446276s] [1.253446276s] END
I0813 00:24:09.129569 1 trace.go:205] Trace[1940889522]: "Get" url:/api/v1/namespaces/kube-system/endpoints/k8s.io-minikube-hostpath,user-agent:storage-provisioner/v0.0.0 (linux/amd64) kubernetes/$Format,client:192.168.127.196,accept:application/json, */*,protocol:HTTP/2.0 (13-Aug-2021 00:24:07.809) (total time: 1319ms):
Trace[1940889522]: ---"About to write a response" 1319ms (00:24:00.129)
Trace[1940889522]: [1.319757496s] [1.319757496s] END
I0813 00:24:09.688489 1 trace.go:205] Trace[1106331583]: "GuaranteedUpdate etcd3" type:*v1.Endpoints (13-Aug-2021 00:24:09.160) (total time: 527ms):
Trace[1106331583]: ---"Transaction committed" 523ms (00:24:00.688)
Trace[1106331583]: [527.566261ms] [527.566261ms] END
*
* ==> kube-controller-manager [9cb0b80b9734ad17c461045a38b0d0a6b8d8686ef46bf8dd99a00d62b8cc67fe] <==
* /usr/local/go/src/bytes/buffer.go:204 +0xbe
crypto/tls.(*Conn).readFromUntil(0xc000126e00, 0x500dda0, 0xc0008fa500, 0x5, 0xc0008fa500, 0x431)
/usr/local/go/src/crypto/tls/conn.go:798 +0xf3
crypto/tls.(*Conn).readRecordOrCCS(0xc000126e00, 0x0, 0x0, 0x0)
/usr/local/go/src/crypto/tls/conn.go:605 +0x115
crypto/tls.(*Conn).readRecord(...)
/usr/local/go/src/crypto/tls/conn.go:573
crypto/tls.(*Conn).Read(0xc000126e00, 0xc000be3000, 0x1000, 0x1000, 0x0, 0x0, 0x0)
/usr/local/go/src/crypto/tls/conn.go:1276 +0x165
bufio.(*Reader).Read(0xc0001c8180, 0xc0002631b8, 0x9, 0x9, 0x9b69eb, 0xc001059c78, 0x4071a5)
/usr/local/go/src/bufio/bufio.go:227 +0x222
io.ReadAtLeast(0x5007a00, 0xc0001c8180, 0xc0002631b8, 0x9, 0x9, 0x9, 0xc000c0fec0, 0x4c5c995259c000, 0xc000c0fec0)
/usr/local/go/src/io/io.go:328 +0x87
io.ReadFull(...)
/usr/local/go/src/io/io.go:347
k8s.io/kubernetes/vendor/golang.org/x/net/http2.readFrameHeader(0xc0002631b8, 0x9, 0x9, 0x5007a00, 0xc0001c8180, 0x0, 0x0, 0x0, 0x0)
/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/golang.org/x/net/http2/frame.go:237 +0x89
k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Framer).ReadFrame(0xc000263180, 0xc00107d3b0, 0x0, 0x0, 0x0)
/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/golang.org/x/net/http2/frame.go:492 +0xa5
k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*clientConnReadLoop).run(0xc001059fa8, 0x0, 0x0)
/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/golang.org/x/net/http2/transport.go:1819 +0xd8
k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).readLoop(0xc000001200)
/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/golang.org/x/net/http2/transport.go:1741 +0x6f
created by k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).newClientConn
/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/golang.org/x/net/http2/transport.go:705 +0x6c5
*
* ==> kube-controller-manager [a4a4ec1132e56e93b77cf82333943e5daaab8efa48df65ced586f5a324cde37d] <==
* I0813 00:23:57.943260 1 shared_informer.go:247] Caches are synced for ReplicaSet
I0813 00:23:57.947953 1 shared_informer.go:247] Caches are synced for deployment
I0813 00:23:57.955167 1 shared_informer.go:247] Caches are synced for namespace
I0813 00:23:57.955304 1 shared_informer.go:247] Caches are synced for TTL
I0813 00:23:57.957499 1 shared_informer.go:247] Caches are synced for cronjob
I0813 00:23:57.961016 1 shared_informer.go:240] Waiting for caches to sync for garbage collector
I0813 00:23:57.962629 1 shared_informer.go:247] Caches are synced for HPA
I0813 00:23:57.965129 1 shared_informer.go:247] Caches are synced for job
I0813 00:23:57.967122 1 shared_informer.go:247] Caches are synced for certificate-csrsigning-kubelet-serving
I0813 00:23:57.971271 1 shared_informer.go:247] Caches are synced for certificate-csrsigning-kubelet-client
I0813 00:23:57.972709 1 shared_informer.go:247] Caches are synced for certificate-csrsigning-legacy-unknown
I0813 00:23:57.974698 1 shared_informer.go:247] Caches are synced for TTL after finished
I0813 00:23:57.982328 1 shared_informer.go:247] Caches are synced for certificate-csrapproving
I0813 00:23:57.982525 1 shared_informer.go:247] Caches are synced for certificate-csrsigning-kube-apiserver-client
I0813 00:23:57.982606 1 shared_informer.go:247] Caches are synced for stateful set
I0813 00:23:57.984923 1 shared_informer.go:247] Caches are synced for bootstrap_signer
I0813 00:23:57.989195 1 shared_informer.go:247] Caches are synced for persistent volume
I0813 00:23:57.993635 1 shared_informer.go:247] Caches are synced for crt configmap
I0813 00:23:58.072945 1 shared_informer.go:247] Caches are synced for disruption
I0813 00:23:58.073409 1 disruption.go:371] Sending events to api server.
I0813 00:23:58.151201 1 shared_informer.go:247] Caches are synced for resource quota
I0813 00:23:58.160353 1 shared_informer.go:247] Caches are synced for resource quota
I0813 00:23:58.659003 1 shared_informer.go:247] Caches are synced for garbage collector
I0813 00:23:58.659723 1 garbagecollector.go:151] Garbage collector: all resource monitors have synced. Proceeding to collect garbage
I0813 00:23:58.662160 1 shared_informer.go:247] Caches are synced for garbage collector
*
* ==> kube-proxy [6204b21ab9c2c6e799da39874e9eb93e39284c65231dd05603088db2fa6b8e6b] <==
* I0813 00:23:26.907583 1 node.go:172] Successfully retrieved node IP: 192.168.127.196
I0813 00:23:26.907779 1 server_others.go:140] Detected node IP 192.168.127.196
W0813 00:23:26.907836 1 server_others.go:598] Unknown proxy mode "", assuming iptables proxy
W0813 00:23:27.211577 1 server_others.go:197] No iptables support for IPv6: exit status 3
I0813 00:23:27.211713 1 server_others.go:208] kube-proxy running in single-stack IPv4 mode
I0813 00:23:27.211745 1 server_others.go:212] Using iptables Proxier.
I0813 00:23:27.212315 1 server.go:643] Version: v1.21.3
I0813 00:23:27.215676 1 config.go:315] Starting service config controller
I0813 00:23:27.215816 1 shared_informer.go:240] Waiting for caches to sync for service config
I0813 00:23:27.215853 1 config.go:224] Starting endpoint slice config controller
I0813 00:23:27.215859 1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
W0813 00:23:27.234006 1 warnings.go:70] discovery.k8s.io/v1beta1 EndpointSlice is deprecated in v1.21+, unavailable in v1.25+; use discovery.k8s.io/v1 EndpointSlice
W0813 00:23:27.237301 1 warnings.go:70] discovery.k8s.io/v1beta1 EndpointSlice is deprecated in v1.21+, unavailable in v1.25+; use discovery.k8s.io/v1 EndpointSlice
I0813 00:23:27.316802 1 shared_informer.go:247] Caches are synced for endpoint slice config
I0813 00:23:27.316986 1 shared_informer.go:247] Caches are synced for service config
*
* ==> kube-proxy [c11b8a977685bc2516a3a180b7d7e5a078649d5b9c68db67af64bdbf0438193c] <==
* I0813 00:22:08.526654 1 node.go:172] Successfully retrieved node IP: 192.168.127.196
I0813 00:22:08.527015 1 server_others.go:140] Detected node IP 192.168.127.196
W0813 00:22:08.527394 1 server_others.go:598] Unknown proxy mode "", assuming iptables proxy
W0813 00:22:08.644892 1 server_others.go:197] No iptables support for IPv6: exit status 3
I0813 00:22:08.644915 1 server_others.go:208] kube-proxy running in single-stack IPv4 mode
I0813 00:22:08.644934 1 server_others.go:212] Using iptables Proxier.
I0813 00:22:08.647847 1 server.go:643] Version: v1.21.3
I0813 00:22:08.651170 1 config.go:315] Starting service config controller
I0813 00:22:08.651751 1 shared_informer.go:240] Waiting for caches to sync for service config
I0813 00:22:08.657258 1 config.go:224] Starting endpoint slice config controller
I0813 00:22:08.657749 1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
W0813 00:22:08.659509 1 warnings.go:70] discovery.k8s.io/v1beta1 EndpointSlice is deprecated in v1.21+, unavailable in v1.25+; use discovery.k8s.io/v1 EndpointSlice
W0813 00:22:08.695660 1 warnings.go:70] discovery.k8s.io/v1beta1 EndpointSlice is deprecated in v1.21+, unavailable in v1.25+; use discovery.k8s.io/v1 EndpointSlice
I0813 00:22:08.752469 1 shared_informer.go:247] Caches are synced for service config
I0813 00:22:08.759025 1 shared_informer.go:247] Caches are synced for endpoint slice config
*
* ==> kube-scheduler [3afc8c09f828616463f8d4246cdb7a602c45569e04de078f3b507b5df49993e8] <==
* E0813 00:21:42.747204 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:42.747540 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0813 00:21:42.748267 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:42.748530 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0813 00:21:42.749032 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0813 00:21:42.749616 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:42.747557 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0813 00:21:42.750598 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1beta1.CSIStorageCapacity: failed to list *v1beta1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:42.751707 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0813 00:21:43.586759 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:43.586851 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0813 00:21:43.611677 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0813 00:21:43.619757 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0813 00:21:43.758571 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1beta1.CSIStorageCapacity: failed to list *v1beta1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:43.808629 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E0813 00:21:43.858463 1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0813 00:21:43.874015 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0813 00:21:43.903878 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0813 00:21:43.932330 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0813 00:21:44.053954 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0813 00:21:44.184579 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:44.276152 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:44.276696 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0813 00:21:45.625656 1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
I0813 00:21:51.218463 1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
*
* ==> kube-scheduler [64c935095bced983323337af866a47ac732cfe3496f8dfd31387b8833f7cc6c0] <==
* I0813 00:23:17.339713 1 serving.go:347] Generated self-signed cert in-memory
W0813 00:23:24.103838 1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W0813 00:23:24.103977 1 authentication.go:337] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W0813 00:23:24.103989 1 authentication.go:338] Continuing without authentication configuration. This may treat all requests as anonymous.
W0813 00:23:24.103997 1 authentication.go:339] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I0813 00:23:24.177616 1 secure_serving.go:197] Serving securely on 127.0.0.1:10259
I0813 00:23:24.178246 1 tlsconfig.go:240] Starting DynamicServingCertificateController
I0813 00:23:24.179347 1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I0813 00:23:24.196646 1 shared_informer.go:240] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I0813 00:23:24.304977 1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
*
* ==> kubelet <==
* -- Logs begin at Fri 2021-08-13 00:20:51 UTC, end at Fri 2021-08-13 00:24:11 UTC. --
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.907271 5078 server.go:660] "--cgroups-per-qos enabled, but --cgroup-root was not specified. defaulting to /"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.907671 5078 container_manager_linux.go:278] "Container manager verified user specified cgroup-root exists" cgroupRoot=[]
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.907807 5078 container_manager_linux.go:283] "Creating Container Manager object based on Node Config" nodeConfig={RuntimeCgroupsName: SystemCgroupsName: KubeletCgroupsName: ContainerRuntime:remote CgroupsPerQOS:true CgroupRoot:/ CgroupDriver:cgroupfs KubeletRootDir:/var/lib/kubelet ProtectKernelDefaults:false NodeAllocatableConfig:{KubeReservedCgroupName: SystemReservedCgroupName: ReservedSystemCPUs: EnforceNodeAllocatable:map[pods:{}] KubeReserved:map[] SystemReserved:map[] HardEvictionThresholds:[]} QOSReserved:map[] ExperimentalCPUManagerPolicy:none ExperimentalTopologyManagerScope:container ExperimentalCPUManagerReconcilePeriod:10s ExperimentalMemoryManagerPolicy:None ExperimentalMemoryManagerReservedMemory:[] ExperimentalPodPidsLimit:-1 EnforceCPULimits:true CPUCFSQuotaPeriod:100ms ExperimentalTopologyManagerPolicy:none}
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.907872 5078 topology_manager.go:120] "Creating topology manager with policy per scope" topologyPolicyName="none" topologyScopeName="container"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.907885 5078 container_manager_linux.go:314] "Initializing Topology Manager" policy="none" scope="container"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.907893 5078 container_manager_linux.go:319] "Creating device plugin manager" devicePluginEnabled=true
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908017 5078 remote_runtime.go:62] parsed scheme: ""
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908027 5078 remote_runtime.go:62] scheme "" not registered, fallback to default scheme
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908151 5078 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{/run/containerd/containerd.sock <nil> 0 <nil>}] <nil> <nil>}
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908169 5078 clientconn.go:948] ClientConn switching balancer to "pick_first"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908324 5078 remote_image.go:50] parsed scheme: ""
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908333 5078 remote_image.go:50] scheme "" not registered, fallback to default scheme
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908348 5078 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{/run/containerd/containerd.sock <nil> 0 <nil>}] <nil> <nil>}
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908359 5078 clientconn.go:948] ClientConn switching balancer to "pick_first"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908467 5078 kubelet.go:404] "Attempting to sync node with API server"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908488 5078 kubelet.go:272] "Adding static pod path" path="/etc/kubernetes/manifests"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908521 5078 kubelet.go:283] "Adding apiserver pod source"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908555 5078 apiserver.go:42] "Waiting for node sync before watching apiserver pods"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.909189 5078 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.913695 5078 kuberuntime_manager.go:222] "Container runtime initialized" containerRuntime="containerd" version="v1.4.9" apiVersion="v1alpha2"
Aug 13 00:24:07 pause-20210813001951-679351 kubelet[5078]: E0813 00:24:07.237758 5078 aws_credentials.go:77] while getting AWS credentials NoCredentialProviders: no valid providers in chain. Deprecated.
Aug 13 00:24:07 pause-20210813001951-679351 kubelet[5078]: For verbose messaging see aws.Config.CredentialsChainVerboseErrors
Aug 13 00:24:07 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:07.244131 5078 server.go:1190] "Started kubelet"
Aug 13 00:24:07 pause-20210813001951-679351 systemd[1]: kubelet.service: Succeeded.
Aug 13 00:24:07 pause-20210813001951-679351 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.
*
* ==> storage-provisioner [7899222a89a71ffe4fb8d232a5b3799b5c94ea7e430406ce44654a4c80946ed9] <==
* I0813 00:23:30.839269 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0813 00:23:30.879440 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0813 00:23:30.880848 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0813 00:23:30.925896 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0813 00:23:30.926952 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_pause-20210813001951-679351_90bbf71c-5385-47c0-853d-3e2fde5ecc99!
I0813 00:23:30.936151 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"44c00147-d6a4-4a55-bec0-85ce7cb56602", APIVersion:"v1", ResourceVersion:"544", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' pause-20210813001951-679351_90bbf71c-5385-47c0-853d-3e2fde5ecc99 became leader
I0813 00:23:31.046717 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_pause-20210813001951-679351_90bbf71c-5385-47c0-853d-3e2fde5ecc99!
-- /stdout --
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p pause-20210813001951-679351 -n pause-20210813001951-679351
helpers_test.go:255: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p pause-20210813001951-679351 -n pause-20210813001951-679351: exit status 2 (250.36369ms)
-- stdout --
Running
-- /stdout --
helpers_test.go:255: status error: exit status 2 (may be ok)
helpers_test.go:262: (dbg) Run: kubectl --context pause-20210813001951-679351 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:271: non-running pods:
helpers_test.go:273: ======> post-mortem[TestPause/serial/PauseAgain]: describe non-running pods <======
helpers_test.go:276: (dbg) Run: kubectl --context pause-20210813001951-679351 describe pod
helpers_test.go:276: (dbg) Non-zero exit: kubectl --context pause-20210813001951-679351 describe pod : exit status 1 (48.832134ms)
** stderr **
error: resource name may not be empty
** /stderr **
helpers_test.go:278: kubectl --context pause-20210813001951-679351 describe pod : exit status 1
helpers_test.go:223: -----------------------post-mortem--------------------------------
helpers_test.go:240: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p pause-20210813001951-679351 -n pause-20210813001951-679351
helpers_test.go:240: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p pause-20210813001951-679351 -n pause-20210813001951-679351: exit status 2 (258.652091ms)
-- stdout --
Running
-- /stdout --
helpers_test.go:240: status error: exit status 2 (may be ok)
helpers_test.go:245: <<< TestPause/serial/PauseAgain FAILED: start of post-mortem logs <<<
helpers_test.go:246: ======> post-mortem[TestPause/serial/PauseAgain]: minikube logs <======
helpers_test.go:248: (dbg) Run: out/minikube-linux-amd64 -p pause-20210813001951-679351 logs -n 25
helpers_test.go:248: (dbg) Done: out/minikube-linux-amd64 -p pause-20210813001951-679351 logs -n 25: (1.601554483s)
helpers_test.go:253: TestPause/serial/PauseAgain logs:
-- stdout --
*
* ==> Audit <==
* |---------|------------------------------------------|------------------------------------------|---------|---------|-------------------------------|-------------------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|------------------------------------------|------------------------------------------|---------|---------|-------------------------------|-------------------------------|
| stop | -p | scheduled-stop-20210813001820-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:19:35 UTC | Fri, 13 Aug 2021 00:19:42 UTC |
| | scheduled-stop-20210813001820-679351 | | | | | |
| | --schedule 5s | | | | | |
| delete | -p | scheduled-stop-20210813001820-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:19:50 UTC | Fri, 13 Aug 2021 00:19:51 UTC |
| | scheduled-stop-20210813001820-679351 | | | | | |
| start | -p | force-systemd-env-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:19:51 UTC | Fri, 13 Aug 2021 00:21:03 UTC |
| | force-systemd-env-20210813001951-679351 | | | | | |
| | --memory=2048 --alsologtostderr | | | | | |
| | -v=5 --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| -p | force-systemd-env-20210813001951-679351 | force-systemd-env-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:21:03 UTC | Fri, 13 Aug 2021 00:21:04 UTC |
| | ssh cat /etc/containerd/config.toml | | | | | |
| delete | -p | force-systemd-env-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:21:04 UTC | Fri, 13 Aug 2021 00:21:05 UTC |
| | force-systemd-env-20210813001951-679351 | | | | | |
| delete | -p | kubenet-20210813002105-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:21:05 UTC | Fri, 13 Aug 2021 00:21:05 UTC |
| | kubenet-20210813002105-679351 | | | | | |
| delete | -p false-20210813002105-679351 | false-20210813002105-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:21:05 UTC | Fri, 13 Aug 2021 00:21:05 UTC |
| start | -p | offline-containerd-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:19:51 UTC | Fri, 13 Aug 2021 00:22:10 UTC |
| | offline-containerd-20210813001951-679351 | | | | | |
| | --alsologtostderr -v=1 --memory=2048 | | | | | |
| | --wait=true --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| delete | -p | offline-containerd-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:22:10 UTC | Fri, 13 Aug 2021 00:22:11 UTC |
| | offline-containerd-20210813001951-679351 | | | | | |
| start | -p | force-systemd-flag-20210813002108-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:21:08 UTC | Fri, 13 Aug 2021 00:22:38 UTC |
| | force-systemd-flag-20210813002108-679351 | | | | | |
| | --memory=2048 --force-systemd | | | | | |
| | --alsologtostderr -v=5 --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| -p | force-systemd-flag-20210813002108-679351 | force-systemd-flag-20210813002108-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:22:38 UTC | Fri, 13 Aug 2021 00:22:38 UTC |
| | ssh cat /etc/containerd/config.toml | | | | | |
| delete | -p | force-systemd-flag-20210813002108-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:22:38 UTC | Fri, 13 Aug 2021 00:22:40 UTC |
| | force-systemd-flag-20210813002108-679351 | | | | | |
| start | -p pause-20210813001951-679351 | pause-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:19:51 UTC | Fri, 13 Aug 2021 00:22:49 UTC |
| | --memory=2048 | | | | | |
| | --install-addons=false | | | | | |
| | --wait=all --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| start | -p | cert-options-20210813002211-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:22:11 UTC | Fri, 13 Aug 2021 00:23:29 UTC |
| | cert-options-20210813002211-679351 | | | | | |
| | --memory=2048 | | | | | |
| | --apiserver-ips=127.0.0.1 | | | | | |
| | --apiserver-ips=192.168.15.15 | | | | | |
| | --apiserver-names=localhost | | | | | |
| | --apiserver-names=www.google.com | | | | | |
| | --apiserver-port=8555 | | | | | |
| | --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| -p | cert-options-20210813002211-679351 | cert-options-20210813002211-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:23:29 UTC | Fri, 13 Aug 2021 00:23:29 UTC |
| | ssh openssl x509 -text -noout -in | | | | | |
| | /var/lib/minikube/certs/apiserver.crt | | | | | |
| delete | -p | cert-options-20210813002211-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:23:30 UTC | Fri, 13 Aug 2021 00:23:31 UTC |
| | cert-options-20210813002211-679351 | | | | | |
| start | -p | stopped-upgrade-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:22:29 UTC | Fri, 13 Aug 2021 00:23:40 UTC |
| | stopped-upgrade-20210813001951-679351 | | | | | |
| | --memory=2200 --alsologtostderr | | | | | |
| | -v=1 --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| logs | -p | stopped-upgrade-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:23:40 UTC | Fri, 13 Aug 2021 00:23:42 UTC |
| | stopped-upgrade-20210813001951-679351 | | | | | |
| delete | -p | stopped-upgrade-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:23:42 UTC | Fri, 13 Aug 2021 00:23:43 UTC |
| | stopped-upgrade-20210813001951-679351 | | | | | |
| start | -p pause-20210813001951-679351 | pause-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:22:49 UTC | Fri, 13 Aug 2021 00:23:59 UTC |
| | --alsologtostderr | | | | | |
| | -v=1 --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| pause | -p pause-20210813001951-679351 | pause-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:23:59 UTC | Fri, 13 Aug 2021 00:24:00 UTC |
| | --alsologtostderr -v=5 | | | | | |
| unpause | -p pause-20210813001951-679351 | pause-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:24:00 UTC | Fri, 13 Aug 2021 00:24:03 UTC |
| | --alsologtostderr -v=5 | | | | | |
| start | -p | kubernetes-upgrade-20210813002240-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:22:40 UTC | Fri, 13 Aug 2021 00:24:04 UTC |
| | kubernetes-upgrade-20210813002240-679351 | | | | | |
| | --memory=2200 | | | | | |
| | --kubernetes-version=v1.14.0 | | | | | |
| | --alsologtostderr -v=1 --driver=kvm2 | | | | | |
| | --container-runtime=containerd | | | | | |
| stop | -p | kubernetes-upgrade-20210813002240-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:24:04 UTC | Fri, 13 Aug 2021 00:24:10 UTC |
| | kubernetes-upgrade-20210813002240-679351 | | | | | |
| -p | pause-20210813001951-679351 | pause-20210813001951-679351 | jenkins | v1.22.0 | Fri, 13 Aug 2021 00:24:10 UTC | Fri, 13 Aug 2021 00:24:11 UTC |
| | logs -n 25 | | | | | |
|---------|------------------------------------------|------------------------------------------|---------|---------|-------------------------------|-------------------------------|
*
* ==> Last Start <==
* Log file created at: 2021/08/13 00:24:11
Running on machine: debian-jenkins-agent-10
Binary: Built with gc go1.16.7 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0813 00:24:11.119018 717315 out.go:298] Setting OutFile to fd 1 ...
I0813 00:24:11.119125 717315 out.go:345] TERM=,COLORTERM=, which probably does not support color
I0813 00:24:11.119134 717315 out.go:311] Setting ErrFile to fd 2...
I0813 00:24:11.119138 717315 out.go:345] TERM=,COLORTERM=, which probably does not support color
I0813 00:24:11.119251 717315 root.go:313] Updating PATH: /home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/bin
I0813 00:24:11.119487 717315 out.go:305] Setting JSON to false
I0813 00:24:11.155154 717315 start.go:111] hostinfo: {"hostname":"debian-jenkins-agent-10","uptime":14814,"bootTime":1628799437,"procs":204,"os":"linux","platform":"debian","platformFamily":"debian","platformVersion":"9.13","kernelVersion":"4.9.0-16-amd64","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"c29e0b88-ef83-6765-d2fa-208fdce1af32"}
I0813 00:24:11.155270 717315 start.go:121] virtualization: kvm guest
I0813 00:24:11.158258 717315 out.go:177] * [kubernetes-upgrade-20210813002240-679351] minikube v1.22.0 on Debian 9.13 (kvm/amd64)
I0813 00:24:11.159950 717315 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/kubeconfig
I0813 00:24:11.158423 717315 notify.go:169] Checking for updates...
I0813 00:24:11.161434 717315 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0813 00:24:11.162942 717315 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube
I0813 00:24:11.164427 717315 out.go:177] - MINIKUBE_LOCATION=12230
I0813 00:24:11.165289 717315 main.go:130] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_containerd_integration/out/docker-machine-driver-kvm2
I0813 00:24:11.165347 717315 main.go:130] libmachine: Launching plugin server for driver kvm2
I0813 00:24:11.177008 717315 main.go:130] libmachine: Plugin server listening at address 127.0.0.1:39705
I0813 00:24:11.177479 717315 main.go:130] libmachine: () Calling .GetVersion
I0813 00:24:11.178172 717315 main.go:130] libmachine: Using API Version 1
I0813 00:24:11.178199 717315 main.go:130] libmachine: () Calling .SetConfigRaw
I0813 00:24:11.185428 717315 main.go:130] libmachine: () Calling .GetMachineName
I0813 00:24:11.185769 717315 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .DriverName
I0813 00:24:11.186177 717315 driver.go:335] Setting default libvirt URI to qemu:///system
I0813 00:24:11.186609 717315 main.go:130] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_containerd_integration/out/docker-machine-driver-kvm2
I0813 00:24:11.186656 717315 main.go:130] libmachine: Launching plugin server for driver kvm2
I0813 00:24:11.198110 717315 main.go:130] libmachine: Plugin server listening at address 127.0.0.1:36983
I0813 00:24:11.198547 717315 main.go:130] libmachine: () Calling .GetVersion
I0813 00:24:11.199530 717315 main.go:130] libmachine: Using API Version 1
I0813 00:24:11.199558 717315 main.go:130] libmachine: () Calling .SetConfigRaw
I0813 00:24:11.199935 717315 main.go:130] libmachine: () Calling .GetMachineName
I0813 00:24:11.200112 717315 main.go:130] libmachine: (kubernetes-upgrade-20210813002240-679351) Calling .DriverName
I0813 00:24:11.236730 717315 out.go:177] * Using the kvm2 driver based on existing profile
I0813 00:24:11.236757 717315 start.go:278] selected driver: kvm2
I0813 00:24:11.236763 717315 start.go:751] validating driver "kvm2" against &{Name:kubernetes-upgrade-20210813002240-679351 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/12122/minikube-v1.22.0-1628238775-12122.iso KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.25@sha256:6f936e3443b95cd918d77623bf7b595653bb382766e280290a02b4a349e88b79 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.
14.0 ClusterName:kubernetes-upgrade-20210813002240-679351 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.50.136 Port:8443 KubernetesVersion:v1.14.0 ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: MultiNodeRequested:false ExtraDisks:0}
I0813 00:24:11.236883 717315 start.go:762] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc:}
I0813 00:24:11.237960 717315 install.go:52] acquiring lock: {Name:mk900956b073697a4aa6c80a27c6bb0742a99a53 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0813 00:24:11.238119 717315 install.go:117] Validating docker-machine-driver-kvm2, PATH=/home/jenkins/minikube-integration/linux-amd64-kvm2-containerd-12230-675919-1c76ff5cea01605c2d985c010644edf1e689d34b/.minikube/bin:/home/jenkins/workspace/KVM_Linux_containerd_integration/out/:/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games:/usr/local/go/bin:/home/jenkins/go/bin:/usr/local/bin/:/usr/local/go/bin/:/home/jenkins/go/bin
I0813 00:24:11.256957 717315 install.go:137] /home/jenkins/workspace/KVM_Linux_containerd_integration/out/docker-machine-driver-kvm2 version is 1.22.0
I0813 00:24:11.257394 717315 cni.go:93] Creating CNI manager for ""
I0813 00:24:11.257413 717315 cni.go:163] "kvm2" driver + containerd runtime found, recommending bridge
I0813 00:24:11.257425 717315 start_flags.go:277] config:
{Name:kubernetes-upgrade-20210813002240-679351 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/12122/minikube-v1.22.0-1628238775-12122.iso KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.25@sha256:6f936e3443b95cd918d77623bf7b595653bb382766e280290a02b4a349e88b79 Memory:2200 CPUs:2 DiskSize:20000 VMDriver: Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.99.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.22.0-rc.0 ClusterName:kubernetes-upgrade-20210813002240-679351 Namespace:defau
lt APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.50.136 Port:8443 KubernetesVersion:v1.14.0 ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: MultiNodeRequested:false ExtraDisks:0}
I0813 00:24:11.257652 717315 iso.go:123] acquiring lock: {Name:mke80f4e00d5590a17349e0875191e5cd211cb9b Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
*
* ==> container status <==
* CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
a4a4ec1132e56 bc2bb319a7038 28 seconds ago Running kube-controller-manager 2 a8ae77b235803
7899222a89a71 6e38f40d628db 42 seconds ago Running storage-provisioner 0 4a6e63928b11f
d30696c5405d8 296a6d5035e2d 54 seconds ago Running coredns 1 f5e6ffa407fcd
6204b21ab9c2c adb2816ea823a 56 seconds ago Running kube-proxy 1 46f36163c53e1
705c524b7bd2d 0369cf4303ffd 56 seconds ago Running etcd 1 accc3895e9638
64c935095bced 6be0dc1302e30 57 seconds ago Running kube-scheduler 1 2c9091a6bd8d7
85bd885bbae1e 3d174f00aa39e 57 seconds ago Running kube-apiserver 1 bb4e7930b2ada
9cb0b80b9734a bc2bb319a7038 57 seconds ago Exited kube-controller-manager 1 a8ae77b235803
efb6b8992aa82 296a6d5035e2d 2 minutes ago Exited coredns 0 36cd319da9139
c11b8a977685b adb2816ea823a 2 minutes ago Exited kube-proxy 0 7d0f0371d8768
2efebee19d7a6 0369cf4303ffd 2 minutes ago Exited etcd 0 54509c8ec15d2
3874ae5baf2d8 3d174f00aa39e 2 minutes ago Exited kube-apiserver 0 c77d72f80a55b
3afc8c09f8286 6be0dc1302e30 2 minutes ago Exited kube-scheduler 0 1bd10dcb9a7ea
*
* ==> containerd <==
* -- Logs begin at Fri 2021-08-13 00:20:51 UTC, end at Fri 2021-08-13 00:24:13 UTC. --
Aug 13 00:23:18 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:18.421885649Z" level=info msg="StartContainer for \"6204b21ab9c2c6e799da39874e9eb93e39284c65231dd05603088db2fa6b8e6b\" returns successfully"
Aug 13 00:23:18 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:18.634895282Z" level=info msg="StartContainer for \"d30696c5405d8e3fbc2bfe7ef7e391b98c301d6056d08f1d32a9614f101edc6f\" returns successfully"
Aug 13 00:23:27 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:27.651258400Z" level=info msg="Finish piping stderr of container \"9cb0b80b9734ad17c461045a38b0d0a6b8d8686ef46bf8dd99a00d62b8cc67fe\""
Aug 13 00:23:27 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:27.652374051Z" level=info msg="Finish piping stdout of container \"9cb0b80b9734ad17c461045a38b0d0a6b8d8686ef46bf8dd99a00d62b8cc67fe\""
Aug 13 00:23:27 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:27.654645594Z" level=info msg="TaskExit event &TaskExit{ContainerID:9cb0b80b9734ad17c461045a38b0d0a6b8d8686ef46bf8dd99a00d62b8cc67fe,ID:9cb0b80b9734ad17c461045a38b0d0a6b8d8686ef46bf8dd99a00d62b8cc67fe,Pid:4197,ExitStatus:255,ExitedAt:2021-08-13 00:23:27.653997105 +0000 UTC,XXX_unrecognized:[],}"
Aug 13 00:23:27 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:27.765885334Z" level=info msg="shim disconnected" id=9cb0b80b9734ad17c461045a38b0d0a6b8d8686ef46bf8dd99a00d62b8cc67fe
Aug 13 00:23:27 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:27.766396530Z" level=error msg="copy shim log" error="read /proc/self/fd/58: file already closed"
Aug 13 00:23:28 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:28.010584031Z" level=info msg="RemoveContainer for \"112c2918d72a90f4b0bbe9d6e1b3134149bf89a77f20de44d4059a1ad6edeff4\""
Aug 13 00:23:28 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:28.021541263Z" level=info msg="RemoveContainer for \"112c2918d72a90f4b0bbe9d6e1b3134149bf89a77f20de44d4059a1ad6edeff4\" returns successfully"
Aug 13 00:23:29 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:29.821371568Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:b781b362-9644-4c96-a463-4cb61bc5ab58,Namespace:kube-system,Attempt:0,}"
Aug 13 00:23:29 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:29.875382976Z" level=info msg="starting signal loop" namespace=k8s.io path=/run/containerd/io.containerd.runtime.v2.task/k8s.io/4a6e63928b11fedafa692c25459f3246b10afd3d4379e021576f2665ed408565 pid=4636
Aug 13 00:23:30 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:30.445012017Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:storage-provisioner,Uid:b781b362-9644-4c96-a463-4cb61bc5ab58,Namespace:kube-system,Attempt:0,} returns sandbox id \"4a6e63928b11fedafa692c25459f3246b10afd3d4379e021576f2665ed408565\""
Aug 13 00:23:30 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:30.455717251Z" level=info msg="CreateContainer within sandbox \"4a6e63928b11fedafa692c25459f3246b10afd3d4379e021576f2665ed408565\" for container &ContainerMetadata{Name:storage-provisioner,Attempt:0,}"
Aug 13 00:23:30 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:30.529886160Z" level=info msg="CreateContainer within sandbox \"4a6e63928b11fedafa692c25459f3246b10afd3d4379e021576f2665ed408565\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"7899222a89a71ffe4fb8d232a5b3799b5c94ea7e430406ce44654a4c80946ed9\""
Aug 13 00:23:30 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:30.531938735Z" level=info msg="StartContainer for \"7899222a89a71ffe4fb8d232a5b3799b5c94ea7e430406ce44654a4c80946ed9\""
Aug 13 00:23:30 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:30.785895579Z" level=info msg="StartContainer for \"7899222a89a71ffe4fb8d232a5b3799b5c94ea7e430406ce44654a4c80946ed9\" returns successfully"
Aug 13 00:23:45 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:45.128731300Z" level=info msg="CreateContainer within sandbox \"a8ae77b235803b4e19e0eb0a6e8e4d70a30100102282506be869694a0b95d264\" for container &ContainerMetadata{Name:kube-controller-manager,Attempt:2,}"
Aug 13 00:23:45 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:45.216530954Z" level=info msg="CreateContainer within sandbox \"a8ae77b235803b4e19e0eb0a6e8e4d70a30100102282506be869694a0b95d264\" for &ContainerMetadata{Name:kube-controller-manager,Attempt:2,} returns container id \"a4a4ec1132e56e93b77cf82333943e5daaab8efa48df65ced586f5a324cde37d\""
Aug 13 00:23:45 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:45.221615537Z" level=info msg="StartContainer for \"a4a4ec1132e56e93b77cf82333943e5daaab8efa48df65ced586f5a324cde37d\""
Aug 13 00:23:45 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:45.473360047Z" level=info msg="StartContainer for \"a4a4ec1132e56e93b77cf82333943e5daaab8efa48df65ced586f5a324cde37d\" returns successfully"
Aug 13 00:23:54 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:54.024224370Z" level=info msg="StopPodSandbox for \"010dd466bc9609853f86415ca26b64dbc3754ff0bcb704d6a8abdf03248fe11a\""
Aug 13 00:23:54 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:54.024332361Z" level=info msg="TearDown network for sandbox \"010dd466bc9609853f86415ca26b64dbc3754ff0bcb704d6a8abdf03248fe11a\" successfully"
Aug 13 00:23:54 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:54.024344869Z" level=info msg="StopPodSandbox for \"010dd466bc9609853f86415ca26b64dbc3754ff0bcb704d6a8abdf03248fe11a\" returns successfully"
Aug 13 00:23:54 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:54.026182269Z" level=info msg="RemovePodSandbox for \"010dd466bc9609853f86415ca26b64dbc3754ff0bcb704d6a8abdf03248fe11a\""
Aug 13 00:23:54 pause-20210813001951-679351 containerd[3537]: time="2021-08-13T00:23:54.033764742Z" level=info msg="RemovePodSandbox \"010dd466bc9609853f86415ca26b64dbc3754ff0bcb704d6a8abdf03248fe11a\" returns successfully"
*
* ==> coredns [d30696c5405d8e3fbc2bfe7ef7e391b98c301d6056d08f1d32a9614f101edc6f] <==
* [INFO] plugin/ready: Still waiting on: "kubernetes"
.:53
[INFO] plugin/reload: Running configuration MD5 = 21fa5447a9370c672668c17fadc8028a
CoreDNS-1.8.0
linux/amd64, go1.15.3, 054c9ae
[INFO] plugin/ready: Still waiting on: "kubernetes"
*
* ==> coredns [efb6b8992aa826974c98985c6dbeb065b7a93d6ceeacacae98b06ec18bbfd5bb] <==
* I0813 00:22:38.694893 1 trace.go:205] Trace[2019727887]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.19.2/tools/cache/reflector.go:156 (13-Aug-2021 00:22:08.692) (total time: 30002ms):
Trace[2019727887]: [30.002199421s] [30.002199421s] END
E0813 00:22:38.695325 1 reflector.go:127] pkg/mod/k8s.io/client-go@v0.19.2/tools/cache/reflector.go:156: Failed to watch *v1.Namespace: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
I0813 00:22:38.695485 1 trace.go:205] Trace[911902081]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.19.2/tools/cache/reflector.go:156 (13-Aug-2021 00:22:08.694) (total time: 30001ms):
Trace[911902081]: [30.001141967s] [30.001141967s] END
E0813 00:22:38.695755 1 reflector.go:127] pkg/mod/k8s.io/client-go@v0.19.2/tools/cache/reflector.go:156: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
I0813 00:22:38.695034 1 trace.go:205] Trace[1427131847]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.19.2/tools/cache/reflector.go:156 (13-Aug-2021 00:22:08.694) (total time: 30000ms):
Trace[1427131847]: [30.000422913s] [30.000422913s] END
E0813 00:22:38.696466 1 reflector.go:127] pkg/mod/k8s.io/client-go@v0.19.2/tools/cache/reflector.go:156: Failed to watch *v1.Endpoints: failed to list *v1.Endpoints: Get "https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/ready: Still waiting on: "kubernetes"
.:53
[INFO] plugin/reload: Running configuration MD5 = db32ca3650231d74073ff4cf814959a7
CoreDNS-1.8.0
linux/amd64, go1.15.3, 054c9ae
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] Reloading
[INFO] plugin/health: Going into lameduck mode for 5s
[INFO] plugin/reload: Running configuration MD5 = 21fa5447a9370c672668c17fadc8028a
[INFO] Reloading complete
*
* ==> describe nodes <==
* Name: pause-20210813001951-679351
Roles: control-plane,master
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=pause-20210813001951-679351
kubernetes.io/os=linux
minikube.k8s.io/commit=dc1c3ca26e9449ce488a773126b8450402c94a19
minikube.k8s.io/name=pause-20210813001951-679351
minikube.k8s.io/updated_at=2021_08_13T00_21_48_0700
minikube.k8s.io/version=v1.22.0
node-role.kubernetes.io/control-plane=
node-role.kubernetes.io/master=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 13 Aug 2021 00:21:44 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: pause-20210813001951-679351
AcquireTime: <unset>
RenewTime: Fri, 13 Aug 2021 00:23:57 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 13 Aug 2021 00:21:57 +0000 Fri, 13 Aug 2021 00:21:38 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 13 Aug 2021 00:21:57 +0000 Fri, 13 Aug 2021 00:21:38 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 13 Aug 2021 00:21:57 +0000 Fri, 13 Aug 2021 00:21:38 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 13 Aug 2021 00:21:57 +0000 Fri, 13 Aug 2021 00:21:57 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.127.196
Hostname: pause-20210813001951-679351
Capacity:
cpu: 2
ephemeral-storage: 17784752Ki
hugepages-2Mi: 0
memory: 2033024Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 17784752Ki
hugepages-2Mi: 0
memory: 2033024Ki
pods: 110
System Info:
Machine ID: 9109403b29744bfb99029b25cc4f9da7
System UUID: 9109403b-2974-4bfb-9902-9b25cc4f9da7
Boot ID: 0d1f634d-bf20-456d-8420-8e644eba3e38
Kernel Version: 4.19.182
OS Image: Buildroot 2020.02.12
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.4.9
Kubelet Version: v1.21.3
Kube-Proxy Version: v1.21.3
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (7 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-558bd4d5db-xjmwl 100m (5%!)(MISSING) 0 (0%!)(MISSING) 70Mi (3%!)(MISSING) 170Mi (8%!)(MISSING) 2m8s
kube-system etcd-pause-20210813001951-679351 100m (5%!)(MISSING) 0 (0%!)(MISSING) 100Mi (5%!)(MISSING) 0 (0%!)(MISSING) 2m28s
kube-system kube-apiserver-pause-20210813001951-679351 250m (12%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m19s
kube-system kube-controller-manager-pause-20210813001951-679351 200m (10%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m19s
kube-system kube-proxy-2mkpr 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m8s
kube-system kube-scheduler-pause-20210813001951-679351 100m (5%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m19s
kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 44s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (37%!)(MISSING) 0 (0%!)(MISSING)
memory 170Mi (8%!)(MISSING) 170Mi (8%!)(MISSING)
ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal NodeHasSufficientMemory 2m41s (x6 over 2m41s) kubelet Node pause-20210813001951-679351 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m41s (x6 over 2m41s) kubelet Node pause-20210813001951-679351 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 2m41s (x5 over 2m41s) kubelet Node pause-20210813001951-679351 status is now: NodeHasSufficientPID
Normal Starting 2m20s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 2m19s kubelet Node pause-20210813001951-679351 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m19s kubelet Node pause-20210813001951-679351 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 2m19s kubelet Node pause-20210813001951-679351 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 2m19s kubelet Updated Node Allocatable limit across pods
Normal NodeReady 2m16s kubelet Node pause-20210813001951-679351 status is now: NodeReady
Normal Starting 2m5s kube-proxy Starting kube-proxy.
Normal Starting 46s kube-proxy Starting kube-proxy.
*
* ==> dmesg <==
* on the kernel command line
[ +0.000130] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
[ +4.671269] systemd-fstab-generator[1160]: Ignoring "noauto" for root device
[ +0.045892] systemd[1]: system-getty.slice: unit configures an IP firewall, but the local system does not support BPF/cgroup firewalling.
[ +0.000003] systemd[1]: (This warning is only shown for the first unit using IP firewalling.)
[ +1.192244] SELinux: unrecognized netlink message: protocol=0 nlmsg_type=106 sclass=netlink_route_socket pid=1717 comm=systemd-network
[ +2.439853] NFSD: the nfsdcld client tracking upcall will be removed in 3.10. Please transition to using nfsdcltrack.
[ +2.154553] vboxguest: loading out-of-tree module taints kernel.
[ +0.006032] vboxguest: PCI device not found, probably running on physical hardware.
[Aug13 00:21] systemd-fstab-generator[2094]: Ignoring "noauto" for root device
[ +0.270933] systemd-fstab-generator[2126]: Ignoring "noauto" for root device
[ +0.157086] systemd-fstab-generator[2141]: Ignoring "noauto" for root device
[ +0.205044] systemd-fstab-generator[2172]: Ignoring "noauto" for root device
[ +8.129227] systemd-fstab-generator[2377]: Ignoring "noauto" for root device
[ +21.150444] systemd-fstab-generator[2810]: Ignoring "noauto" for root device
[Aug13 00:22] kauditd_printk_skb: 38 callbacks suppressed
[ +41.503227] kauditd_printk_skb: 65 callbacks suppressed
[ +6.452067] NFSD: Unable to end grace period: -110
[Aug13 00:23] systemd-fstab-generator[3489]: Ignoring "noauto" for root device
[ +0.238108] systemd-fstab-generator[3502]: Ignoring "noauto" for root device
[ +0.253277] systemd-fstab-generator[3527]: Ignoring "noauto" for root device
[ +17.104028] kauditd_printk_skb: 29 callbacks suppressed
[ +32.355407] systemd-fstab-generator[4902]: Ignoring "noauto" for root device
[Aug13 00:24] systemd-fstab-generator[5070]: Ignoring "noauto" for root device
[ +3.299751] systemd-fstab-generator[5099]: Ignoring "noauto" for root device
*
* ==> etcd [2efebee19d7a6bd77fd1333dab2cc543c575e8c6babdae865b90a1cf0fa48744] <==
* 2021-08-13 00:22:07.230197 W | etcdserver: read-only range request "key:\"/registry/endpointslices/kube-system/kube-dns-5fjzs\" " with result "range_response_count:1 size:1013" took too long (671.993662ms) to execute
2021-08-13 00:22:07.230468 W | etcdserver: read-only range request "key:\"/registry/serviceaccounts/kube-system/coredns\" " with result "range_response_count:1 size:217" took too long (741.585441ms) to execute
2021-08-13 00:22:14.207544 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2021-08-13 00:22:24.208238 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2021-08-13 00:22:34.210334 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2021-08-13 00:22:45.207896 W | etcdserver/api/etcdhttp: /health error; QGET failed etcdserver: request timed out (status code 503)
2021-08-13 00:22:45.919806 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "error:context deadline exceeded" took too long (2.000045576s) to execute
2021-08-13 00:22:47.101521 W | wal: sync duration of 4.051774181s, expected less than 1s
2021-08-13 00:22:47.103383 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/coredns-558bd4d5db-xjmwl\" " with result "range_response_count:1 size:4746" took too long (3.677716425s) to execute
2021-08-13 00:22:47.104500 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-system\" " with result "range_response_count:1 size:351" took too long (2.60570306s) to execute
2021-08-13 00:22:47.104825 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:341" took too long (894.799494ms) to execute
2021-08-13 00:22:47.105989 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/coredns-558bd4d5db-xjmwl\" " with result "range_response_count:1 size:4746" took too long (1.568667767s) to execute
2021-08-13 00:22:47.106790 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (1.176235696s) to execute
2021-08-13 00:22:47.107462 W | etcdserver: read-only range request "key:\"/registry/flowschemas/exempt\" " with result "range_response_count:1 size:879" took too long (2.454270208s) to execute
2021-08-13 00:22:47.950458 W | etcdserver: request "header:<ID:12242045188531646344 username:\"kube-apiserver-etcd-client\" auth_revision:1 > lease_grant:<ttl:3660-second id:29e47b3ce2b56f87>" with result "size:41" took too long (470.318775ms) to execute
2021-08-13 00:22:47.951739 W | etcdserver: read-only range request "key:\"/registry/namespaces/kube-node-lease\" " with result "range_response_count:1 size:363" took too long (801.202969ms) to execute
2021-08-13 00:22:47.955016 W | etcdserver: read-only range request "key:\"/registry/prioritylevelconfigurations/exempt\" " with result "range_response_count:1 size:371" took too long (804.019176ms) to execute
2021-08-13 00:22:47.956311 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/coredns-558bd4d5db-xjmwl\" " with result "range_response_count:1 size:4568" took too long (530.617681ms) to execute
2021-08-13 00:22:47.958454 W | etcdserver: read-only range request "key:\"/registry/priorityclasses/\" range_end:\"/registry/priorityclasses0\" count_only:true " with result "range_response_count:0 size:7" took too long (309.016563ms) to execute
2021-08-13 00:22:48.801008 W | etcdserver: read-only range request "key:\"/registry/minions/pause-20210813001951-679351\" " with result "range_response_count:1 size:4776" took too long (768.144463ms) to execute
2021-08-13 00:22:48.802711 W | etcdserver: read-only range request "key:\"/registry/controllers/\" range_end:\"/registry/controllers0\" count_only:true " with result "range_response_count:0 size:5" took too long (347.906261ms) to execute
2021-08-13 00:22:49.019606 W | etcdserver: read-only range request "key:\"/registry/serviceaccounts/default/\" range_end:\"/registry/serviceaccounts/default0\" " with result "range_response_count:1 size:209" took too long (111.551006ms) to execute
2021-08-13 00:22:49.020521 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (100.648686ms) to execute
2021-08-13 00:22:54.217451 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2021-08-13 00:23:04.208424 I | etcdserver/api/etcdhttp: /health OK (status code 200)
*
* ==> etcd [705c524b7bd2d071e133ec74fb1f433cab624312145e8e0d2e4b19e7936be85b] <==
* 2021-08-13 00:23:25.998767 W | etcdserver: read-only range request "key:\"/registry/leases/kube-node-lease/pause-20210813001951-679351\" " with result "range_response_count:1 size:671" took too long (1.83123856s) to execute
2021-08-13 00:23:25.999024 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/kube-apiserver-pause-20210813001951-679351\" " with result "range_response_count:1 size:6537" took too long (1.83134851s) to execute
2021-08-13 00:23:25.999710 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/default/kubernetes\" " with result "range_response_count:1 size:423" took too long (1.836183835s) to execute
2021-08-13 00:23:26.000315 W | etcdserver: read-only range request "key:\"/registry/serviceaccounts/kube-system/kube-proxy\" " with result "range_response_count:1 size:226" took too long (1.856850168s) to execute
2021-08-13 00:23:26.003298 W | etcdserver: read-only range request "key:\"/registry/prioritylevelconfigurations/exempt\" " with result "range_response_count:1 size:371" took too long (1.862496869s) to execute
2021-08-13 00:23:26.866787 W | etcdserver: read-only range request "key:\"/registry/clusterrolebindings/\" range_end:\"/registry/clusterrolebindings0\" " with result "range_response_count:50 size:37065" took too long (849.021882ms) to execute
2021-08-13 00:23:26.868169 W | etcdserver: request "header:<ID:12242045188557790365 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/leases/kube-node-lease/pause-20210813001951-679351\" mod_revision:486 > success:<request_put:<key:\"/registry/leases/kube-node-lease/pause-20210813001951-679351\" value_size:587 >> failure:<request_range:<key:\"/registry/leases/kube-node-lease/pause-20210813001951-679351\" > >>" with result "size:16" took too long (533.569848ms) to execute
2021-08-13 00:23:26.875022 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (380.351682ms) to execute
2021-08-13 00:23:26.888255 W | etcdserver: read-only range request "key:\"/registry/flowschemas/exempt\" " with result "range_response_count:1 size:879" took too long (866.035216ms) to execute
2021-08-13 00:23:26.891763 W | etcdserver: read-only range request "key:\"/registry/minions/pause-20210813001951-679351\" " with result "range_response_count:1 size:4776" took too long (869.868576ms) to execute
2021-08-13 00:23:26.892452 W | etcdserver: read-only range request "key:\"/registry/masterleases/\" range_end:\"/registry/masterleases0\" " with result "range_response_count:0 size:5" took too long (870.451036ms) to execute
2021-08-13 00:23:26.892806 W | etcdserver: read-only range request "key:\"/registry/priorityclasses/system-cluster-critical\" " with result "range_response_count:1 size:476" took too long (870.772194ms) to execute
2021-08-13 00:23:26.893782 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (353.420112ms) to execute
2021-08-13 00:23:27.618510 W | etcdserver: read-only range request "key:\"/registry/events/kube-system/kube-proxy-2mkpr.169ab5d1b9f0872d\" " with result "range_response_count:1 size:826" took too long (312.125535ms) to execute
2021-08-13 00:23:27.618804 W | etcdserver: read-only range request "key:\"/registry/clusterroles/system:controller:certificate-controller\" " with result "range_response_count:1 size:1142" took too long (324.375348ms) to execute
2021-08-13 00:23:27.619230 W | etcdserver: read-only range request "key:\"/registry/health\" " with result "range_response_count:0 size:5" took too long (251.508486ms) to execute
2021-08-13 00:23:35.057151 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2021-08-13 00:23:44.208010 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2021-08-13 00:23:54.207487 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2021-08-13 00:24:03.753011 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/kube-system/k8s.io-minikube-hostpath\" " with result "range_response_count:1 size:1125" took too long (341.385574ms) to execute
WARNING: 2021/08/13 00:24:07 grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
2021-08-13 00:24:09.122960 W | wal: sync duration of 3.322047764s, expected less than 1s
2021-08-13 00:24:09.124505 W | etcdserver: read-only range request "key:\"/registry/namespaces/default\" " with result "range_response_count:1 size:341" took too long (1.247717726s) to execute
2021-08-13 00:24:09.125866 W | etcdserver: read-only range request "key:\"/registry/services/endpoints/kube-system/k8s.io-minikube-hostpath\" " with result "range_response_count:1 size:1125" took too long (1.315223316s) to execute
2021-08-13 00:24:09.686753 W | etcdserver: request "header:<ID:12242045188557790983 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/masterleases/192.168.127.196\" mod_revision:580 > success:<request_put:<key:\"/registry/masterleases/192.168.127.196\" value_size:70 lease:3018673151703015173 >> failure:<request_range:<key:\"/registry/masterleases/192.168.127.196\" > >>" with result "size:16" took too long (348.846917ms) to execute
*
* ==> kernel <==
* 00:24:13 up 3 min, 0 users, load average: 1.63, 0.95, 0.39
Linux pause-20210813001951-679351 4.19.182 #1 SMP Fri Aug 6 09:11:32 UTC 2021 x86_64 GNU/Linux
PRETTY_NAME="Buildroot 2020.02.12"
*
* ==> kube-apiserver [3874ae5baf2d856570fa5534f52778b464323afbd92eca54de5a983517dbbb65] <==
* Trace[265158055]: [552.827013ms] [552.827013ms] END
I0813 00:22:48.803967 1 trace.go:205] Trace[40150974]: "GuaranteedUpdate etcd3" type:*apps.Deployment (13-Aug-2021 00:22:48.035) (total time: 768ms):
Trace[40150974]: ---"Transaction committed" 766ms (00:22:00.803)
Trace[40150974]: [768.574416ms] [768.574416ms] END
I0813 00:22:48.805449 1 trace.go:205] Trace[38850226]: "Update" url:/apis/apps/v1/namespaces/kube-system/deployments/coredns/status,user-agent:kube-controller-manager/v1.21.3 (linux/amd64) kubernetes/ca643a4/system:serviceaccount:kube-system:deployment-controller,client:192.168.127.196,accept:application/vnd.kubernetes.protobuf, */*,protocol:HTTP/2.0 (13-Aug-2021 00:22:48.034) (total time: 770ms):
Trace[38850226]: ---"Object stored in database" 769ms (00:22:00.805)
Trace[38850226]: [770.627081ms] [770.627081ms] END
I0813 00:22:48.807911 1 trace.go:205] Trace[1153692894]: "GuaranteedUpdate etcd3" type:*discovery.EndpointSlice (13-Aug-2021 00:22:48.029) (total time: 777ms):
Trace[1153692894]: ---"Transaction committed" 776ms (00:22:00.807)
Trace[1153692894]: [777.942281ms] [777.942281ms] END
I0813 00:22:48.811870 1 trace.go:205] Trace[2055050687]: "Get" url:/api/v1/nodes/pause-20210813001951-679351,user-agent:minikube-linux-amd64/v0.0.0 (linux/amd64) kubernetes/$Format,client:192.168.127.1,accept:application/json, */*,protocol:HTTP/2.0 (13-Aug-2021 00:22:48.030) (total time: 780ms):
Trace[2055050687]: ---"About to write a response" 779ms (00:22:00.810)
Trace[2055050687]: [780.446725ms] [780.446725ms] END
I0813 00:22:48.815873 1 trace.go:205] Trace[1709682153]: "Update" url:/apis/discovery.k8s.io/v1/namespaces/kube-system/endpointslices/kube-dns-5fjzs,user-agent:kube-controller-manager/v1.21.3 (linux/amd64) kubernetes/ca643a4/system:serviceaccount:kube-system:endpointslice-controller,client:192.168.127.196,accept:application/vnd.kubernetes.protobuf, */*,protocol:HTTP/2.0 (13-Aug-2021 00:22:48.029) (total time: 786ms):
Trace[1709682153]: ---"Object stored in database" 785ms (00:22:00.815)
Trace[1709682153]: [786.028644ms] [786.028644ms] END
I0813 00:22:48.812641 1 trace.go:205] Trace[1460083222]: "GuaranteedUpdate etcd3" type:*core.Endpoints (13-Aug-2021 00:22:48.029) (total time: 783ms):
Trace[1460083222]: ---"Transaction committed" 782ms (00:22:00.812)
Trace[1460083222]: [783.305236ms] [783.305236ms] END
I0813 00:22:48.819982 1 trace.go:205] Trace[920420538]: "Update" url:/api/v1/namespaces/kube-system/endpoints/kube-dns,user-agent:kube-controller-manager/v1.21.3 (linux/amd64) kubernetes/ca643a4/system:serviceaccount:kube-system:endpoint-controller,client:192.168.127.196,accept:application/vnd.kubernetes.protobuf, */*,protocol:HTTP/2.0 (13-Aug-2021 00:22:48.029) (total time: 790ms):
Trace[920420538]: ---"Object stored in database" 790ms (00:22:00.819)
Trace[920420538]: [790.783982ms] [790.783982ms] END
I0813 00:23:04.050873 1 client.go:360] parsed scheme: "passthrough"
I0813 00:23:04.051445 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I0813 00:23:04.051754 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
*
* ==> kube-apiserver [85bd885bbae1eb206d16fda70dde9e78726f0563495bc0c5f64cf2083b9a7bf7] <==
* I0813 00:23:26.905285 1 storage_scheduling.go:148] all system priority classes are created successfully or already exist.
I0813 00:23:29.331854 1 controller.go:611] quota admission added evaluator for: serviceaccounts
I0813 00:23:29.366533 1 controller.go:611] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0813 00:23:29.384370 1 controller.go:611] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0813 00:23:29.403160 1 controller.go:611] quota admission added evaluator for: endpoints
I0813 00:23:29.518332 1 controller.go:611] quota admission added evaluator for: events.events.k8s.io
I0813 00:23:57.987189 1 controller.go:611] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0813 00:24:01.430291 1 client.go:360] parsed scheme: "passthrough"
I0813 00:24:01.430491 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I0813 00:24:01.430526 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
E0813 00:24:07.259737 1 status.go:71] apiserver received an error that is not an metav1.Status: &errors.errorString{s:"context canceled"}: context canceled
E0813 00:24:07.259860 1 status.go:71] apiserver received an error that is not an metav1.Status: &errors.errorString{s:"client disconnected"}: client disconnected
E0813 00:24:07.263006 1 writers.go:117] apiserver was unable to write a JSON response: http: Handler timeout
E0813 00:24:07.263568 1 wrap.go:54] timeout or abort while handling: GET "/apis/storage.k8s.io/v1/csinodes/pause-20210813001951-679351"
E0813 00:24:07.265357 1 status.go:71] apiserver received an error that is not an metav1.Status: &errors.errorString{s:"http: Handler timeout"}: http: Handler timeout
E0813 00:24:07.268867 1 writers.go:130] apiserver was unable to write a fallback JSON response: http: Handler timeout
I0813 00:24:09.127818 1 trace.go:205] Trace[986017853]: "Get" url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.21.3 (linux/amd64) kubernetes/ca643a4,client:127.0.0.1,accept:application/vnd.kubernetes.protobuf, */*,protocol:HTTP/2.0 (13-Aug-2021 00:24:07.874) (total time: 1253ms):
Trace[986017853]: ---"About to write a response" 1253ms (00:24:00.127)
Trace[986017853]: [1.253446276s] [1.253446276s] END
I0813 00:24:09.129569 1 trace.go:205] Trace[1940889522]: "Get" url:/api/v1/namespaces/kube-system/endpoints/k8s.io-minikube-hostpath,user-agent:storage-provisioner/v0.0.0 (linux/amd64) kubernetes/$Format,client:192.168.127.196,accept:application/json, */*,protocol:HTTP/2.0 (13-Aug-2021 00:24:07.809) (total time: 1319ms):
Trace[1940889522]: ---"About to write a response" 1319ms (00:24:00.129)
Trace[1940889522]: [1.319757496s] [1.319757496s] END
I0813 00:24:09.688489 1 trace.go:205] Trace[1106331583]: "GuaranteedUpdate etcd3" type:*v1.Endpoints (13-Aug-2021 00:24:09.160) (total time: 527ms):
Trace[1106331583]: ---"Transaction committed" 523ms (00:24:00.688)
Trace[1106331583]: [527.566261ms] [527.566261ms] END
*
* ==> kube-controller-manager [9cb0b80b9734ad17c461045a38b0d0a6b8d8686ef46bf8dd99a00d62b8cc67fe] <==
* /usr/local/go/src/bytes/buffer.go:204 +0xbe
crypto/tls.(*Conn).readFromUntil(0xc000126e00, 0x500dda0, 0xc0008fa500, 0x5, 0xc0008fa500, 0x431)
/usr/local/go/src/crypto/tls/conn.go:798 +0xf3
crypto/tls.(*Conn).readRecordOrCCS(0xc000126e00, 0x0, 0x0, 0x0)
/usr/local/go/src/crypto/tls/conn.go:605 +0x115
crypto/tls.(*Conn).readRecord(...)
/usr/local/go/src/crypto/tls/conn.go:573
crypto/tls.(*Conn).Read(0xc000126e00, 0xc000be3000, 0x1000, 0x1000, 0x0, 0x0, 0x0)
/usr/local/go/src/crypto/tls/conn.go:1276 +0x165
bufio.(*Reader).Read(0xc0001c8180, 0xc0002631b8, 0x9, 0x9, 0x9b69eb, 0xc001059c78, 0x4071a5)
/usr/local/go/src/bufio/bufio.go:227 +0x222
io.ReadAtLeast(0x5007a00, 0xc0001c8180, 0xc0002631b8, 0x9, 0x9, 0x9, 0xc000c0fec0, 0x4c5c995259c000, 0xc000c0fec0)
/usr/local/go/src/io/io.go:328 +0x87
io.ReadFull(...)
/usr/local/go/src/io/io.go:347
k8s.io/kubernetes/vendor/golang.org/x/net/http2.readFrameHeader(0xc0002631b8, 0x9, 0x9, 0x5007a00, 0xc0001c8180, 0x0, 0x0, 0x0, 0x0)
/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/golang.org/x/net/http2/frame.go:237 +0x89
k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Framer).ReadFrame(0xc000263180, 0xc00107d3b0, 0x0, 0x0, 0x0)
/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/golang.org/x/net/http2/frame.go:492 +0xa5
k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*clientConnReadLoop).run(0xc001059fa8, 0x0, 0x0)
/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/golang.org/x/net/http2/transport.go:1819 +0xd8
k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).readLoop(0xc000001200)
/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/golang.org/x/net/http2/transport.go:1741 +0x6f
created by k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).newClientConn
/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/golang.org/x/net/http2/transport.go:705 +0x6c5
*
* ==> kube-controller-manager [a4a4ec1132e56e93b77cf82333943e5daaab8efa48df65ced586f5a324cde37d] <==
* I0813 00:23:57.943260 1 shared_informer.go:247] Caches are synced for ReplicaSet
I0813 00:23:57.947953 1 shared_informer.go:247] Caches are synced for deployment
I0813 00:23:57.955167 1 shared_informer.go:247] Caches are synced for namespace
I0813 00:23:57.955304 1 shared_informer.go:247] Caches are synced for TTL
I0813 00:23:57.957499 1 shared_informer.go:247] Caches are synced for cronjob
I0813 00:23:57.961016 1 shared_informer.go:240] Waiting for caches to sync for garbage collector
I0813 00:23:57.962629 1 shared_informer.go:247] Caches are synced for HPA
I0813 00:23:57.965129 1 shared_informer.go:247] Caches are synced for job
I0813 00:23:57.967122 1 shared_informer.go:247] Caches are synced for certificate-csrsigning-kubelet-serving
I0813 00:23:57.971271 1 shared_informer.go:247] Caches are synced for certificate-csrsigning-kubelet-client
I0813 00:23:57.972709 1 shared_informer.go:247] Caches are synced for certificate-csrsigning-legacy-unknown
I0813 00:23:57.974698 1 shared_informer.go:247] Caches are synced for TTL after finished
I0813 00:23:57.982328 1 shared_informer.go:247] Caches are synced for certificate-csrapproving
I0813 00:23:57.982525 1 shared_informer.go:247] Caches are synced for certificate-csrsigning-kube-apiserver-client
I0813 00:23:57.982606 1 shared_informer.go:247] Caches are synced for stateful set
I0813 00:23:57.984923 1 shared_informer.go:247] Caches are synced for bootstrap_signer
I0813 00:23:57.989195 1 shared_informer.go:247] Caches are synced for persistent volume
I0813 00:23:57.993635 1 shared_informer.go:247] Caches are synced for crt configmap
I0813 00:23:58.072945 1 shared_informer.go:247] Caches are synced for disruption
I0813 00:23:58.073409 1 disruption.go:371] Sending events to api server.
I0813 00:23:58.151201 1 shared_informer.go:247] Caches are synced for resource quota
I0813 00:23:58.160353 1 shared_informer.go:247] Caches are synced for resource quota
I0813 00:23:58.659003 1 shared_informer.go:247] Caches are synced for garbage collector
I0813 00:23:58.659723 1 garbagecollector.go:151] Garbage collector: all resource monitors have synced. Proceeding to collect garbage
I0813 00:23:58.662160 1 shared_informer.go:247] Caches are synced for garbage collector
*
* ==> kube-proxy [6204b21ab9c2c6e799da39874e9eb93e39284c65231dd05603088db2fa6b8e6b] <==
* I0813 00:23:26.907583 1 node.go:172] Successfully retrieved node IP: 192.168.127.196
I0813 00:23:26.907779 1 server_others.go:140] Detected node IP 192.168.127.196
W0813 00:23:26.907836 1 server_others.go:598] Unknown proxy mode "", assuming iptables proxy
W0813 00:23:27.211577 1 server_others.go:197] No iptables support for IPv6: exit status 3
I0813 00:23:27.211713 1 server_others.go:208] kube-proxy running in single-stack IPv4 mode
I0813 00:23:27.211745 1 server_others.go:212] Using iptables Proxier.
I0813 00:23:27.212315 1 server.go:643] Version: v1.21.3
I0813 00:23:27.215676 1 config.go:315] Starting service config controller
I0813 00:23:27.215816 1 shared_informer.go:240] Waiting for caches to sync for service config
I0813 00:23:27.215853 1 config.go:224] Starting endpoint slice config controller
I0813 00:23:27.215859 1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
W0813 00:23:27.234006 1 warnings.go:70] discovery.k8s.io/v1beta1 EndpointSlice is deprecated in v1.21+, unavailable in v1.25+; use discovery.k8s.io/v1 EndpointSlice
W0813 00:23:27.237301 1 warnings.go:70] discovery.k8s.io/v1beta1 EndpointSlice is deprecated in v1.21+, unavailable in v1.25+; use discovery.k8s.io/v1 EndpointSlice
I0813 00:23:27.316802 1 shared_informer.go:247] Caches are synced for endpoint slice config
I0813 00:23:27.316986 1 shared_informer.go:247] Caches are synced for service config
*
* ==> kube-proxy [c11b8a977685bc2516a3a180b7d7e5a078649d5b9c68db67af64bdbf0438193c] <==
* I0813 00:22:08.526654 1 node.go:172] Successfully retrieved node IP: 192.168.127.196
I0813 00:22:08.527015 1 server_others.go:140] Detected node IP 192.168.127.196
W0813 00:22:08.527394 1 server_others.go:598] Unknown proxy mode "", assuming iptables proxy
W0813 00:22:08.644892 1 server_others.go:197] No iptables support for IPv6: exit status 3
I0813 00:22:08.644915 1 server_others.go:208] kube-proxy running in single-stack IPv4 mode
I0813 00:22:08.644934 1 server_others.go:212] Using iptables Proxier.
I0813 00:22:08.647847 1 server.go:643] Version: v1.21.3
I0813 00:22:08.651170 1 config.go:315] Starting service config controller
I0813 00:22:08.651751 1 shared_informer.go:240] Waiting for caches to sync for service config
I0813 00:22:08.657258 1 config.go:224] Starting endpoint slice config controller
I0813 00:22:08.657749 1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
W0813 00:22:08.659509 1 warnings.go:70] discovery.k8s.io/v1beta1 EndpointSlice is deprecated in v1.21+, unavailable in v1.25+; use discovery.k8s.io/v1 EndpointSlice
W0813 00:22:08.695660 1 warnings.go:70] discovery.k8s.io/v1beta1 EndpointSlice is deprecated in v1.21+, unavailable in v1.25+; use discovery.k8s.io/v1 EndpointSlice
I0813 00:22:08.752469 1 shared_informer.go:247] Caches are synced for service config
I0813 00:22:08.759025 1 shared_informer.go:247] Caches are synced for endpoint slice config
*
* ==> kube-scheduler [3afc8c09f828616463f8d4246cdb7a602c45569e04de078f3b507b5df49993e8] <==
* E0813 00:21:42.747204 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:42.747540 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0813 00:21:42.748267 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:42.748530 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0813 00:21:42.749032 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0813 00:21:42.749616 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:42.747557 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0813 00:21:42.750598 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1beta1.CSIStorageCapacity: failed to list *v1beta1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:42.751707 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0813 00:21:43.586759 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:43.586851 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0813 00:21:43.611677 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0813 00:21:43.619757 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0813 00:21:43.758571 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1beta1.CSIStorageCapacity: failed to list *v1beta1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:43.808629 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E0813 00:21:43.858463 1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0813 00:21:43.874015 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0813 00:21:43.903878 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0813 00:21:43.932330 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0813 00:21:44.053954 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0813 00:21:44.184579 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:44.276152 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E0813 00:21:44.276696 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0813 00:21:45.625656 1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
I0813 00:21:51.218463 1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
*
* ==> kube-scheduler [64c935095bced983323337af866a47ac732cfe3496f8dfd31387b8833f7cc6c0] <==
* I0813 00:23:17.339713 1 serving.go:347] Generated self-signed cert in-memory
W0813 00:23:24.103838 1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W0813 00:23:24.103977 1 authentication.go:337] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W0813 00:23:24.103989 1 authentication.go:338] Continuing without authentication configuration. This may treat all requests as anonymous.
W0813 00:23:24.103997 1 authentication.go:339] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I0813 00:23:24.177616 1 secure_serving.go:197] Serving securely on 127.0.0.1:10259
I0813 00:23:24.178246 1 tlsconfig.go:240] Starting DynamicServingCertificateController
I0813 00:23:24.179347 1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I0813 00:23:24.196646 1 shared_informer.go:240] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I0813 00:23:24.304977 1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
*
* ==> kubelet <==
* -- Logs begin at Fri 2021-08-13 00:20:51 UTC, end at Fri 2021-08-13 00:24:14 UTC. --
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.907271 5078 server.go:660] "--cgroups-per-qos enabled, but --cgroup-root was not specified. defaulting to /"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.907671 5078 container_manager_linux.go:278] "Container manager verified user specified cgroup-root exists" cgroupRoot=[]
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.907807 5078 container_manager_linux.go:283] "Creating Container Manager object based on Node Config" nodeConfig={RuntimeCgroupsName: SystemCgroupsName: KubeletCgroupsName: ContainerRuntime:remote CgroupsPerQOS:true CgroupRoot:/ CgroupDriver:cgroupfs KubeletRootDir:/var/lib/kubelet ProtectKernelDefaults:false NodeAllocatableConfig:{KubeReservedCgroupName: SystemReservedCgroupName: ReservedSystemCPUs: EnforceNodeAllocatable:map[pods:{}] KubeReserved:map[] SystemReserved:map[] HardEvictionThresholds:[]} QOSReserved:map[] ExperimentalCPUManagerPolicy:none ExperimentalTopologyManagerScope:container ExperimentalCPUManagerReconcilePeriod:10s ExperimentalMemoryManagerPolicy:None ExperimentalMemoryManagerReservedMemory:[] ExperimentalPodPidsLimit:-1 EnforceCPULimits:true CPUCFSQuotaPeriod:100ms ExperimentalTopologyManagerPolicy:none}
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.907872 5078 topology_manager.go:120] "Creating topology manager with policy per scope" topologyPolicyName="none" topologyScopeName="container"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.907885 5078 container_manager_linux.go:314] "Initializing Topology Manager" policy="none" scope="container"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.907893 5078 container_manager_linux.go:319] "Creating device plugin manager" devicePluginEnabled=true
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908017 5078 remote_runtime.go:62] parsed scheme: ""
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908027 5078 remote_runtime.go:62] scheme "" not registered, fallback to default scheme
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908151 5078 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{/run/containerd/containerd.sock <nil> 0 <nil>}] <nil> <nil>}
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908169 5078 clientconn.go:948] ClientConn switching balancer to "pick_first"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908324 5078 remote_image.go:50] parsed scheme: ""
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908333 5078 remote_image.go:50] scheme "" not registered, fallback to default scheme
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908348 5078 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{/run/containerd/containerd.sock <nil> 0 <nil>}] <nil> <nil>}
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908359 5078 clientconn.go:948] ClientConn switching balancer to "pick_first"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908467 5078 kubelet.go:404] "Attempting to sync node with API server"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908488 5078 kubelet.go:272] "Adding static pod path" path="/etc/kubernetes/manifests"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908521 5078 kubelet.go:283] "Adding apiserver pod source"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.908555 5078 apiserver.go:42] "Waiting for node sync before watching apiserver pods"
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.909189 5078 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick
Aug 13 00:24:06 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:06.913695 5078 kuberuntime_manager.go:222] "Container runtime initialized" containerRuntime="containerd" version="v1.4.9" apiVersion="v1alpha2"
Aug 13 00:24:07 pause-20210813001951-679351 kubelet[5078]: E0813 00:24:07.237758 5078 aws_credentials.go:77] while getting AWS credentials NoCredentialProviders: no valid providers in chain. Deprecated.
Aug 13 00:24:07 pause-20210813001951-679351 kubelet[5078]: For verbose messaging see aws.Config.CredentialsChainVerboseErrors
Aug 13 00:24:07 pause-20210813001951-679351 kubelet[5078]: I0813 00:24:07.244131 5078 server.go:1190] "Started kubelet"
Aug 13 00:24:07 pause-20210813001951-679351 systemd[1]: kubelet.service: Succeeded.
Aug 13 00:24:07 pause-20210813001951-679351 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.
*
* ==> storage-provisioner [7899222a89a71ffe4fb8d232a5b3799b5c94ea7e430406ce44654a4c80946ed9] <==
* I0813 00:23:30.839269 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0813 00:23:30.879440 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0813 00:23:30.880848 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0813 00:23:30.925896 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0813 00:23:30.926952 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_pause-20210813001951-679351_90bbf71c-5385-47c0-853d-3e2fde5ecc99!
I0813 00:23:30.936151 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"44c00147-d6a4-4a55-bec0-85ce7cb56602", APIVersion:"v1", ResourceVersion:"544", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' pause-20210813001951-679351_90bbf71c-5385-47c0-853d-3e2fde5ecc99 became leader
I0813 00:23:31.046717 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_pause-20210813001951-679351_90bbf71c-5385-47c0-853d-3e2fde5ecc99!
-- /stdout --
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p pause-20210813001951-679351 -n pause-20210813001951-679351
helpers_test.go:255: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p pause-20210813001951-679351 -n pause-20210813001951-679351: exit status 2 (294.338092ms)
-- stdout --
Running
-- /stdout --
helpers_test.go:255: status error: exit status 2 (may be ok)
helpers_test.go:262: (dbg) Run: kubectl --context pause-20210813001951-679351 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:271: non-running pods:
helpers_test.go:273: ======> post-mortem[TestPause/serial/PauseAgain]: describe non-running pods <======
helpers_test.go:276: (dbg) Run: kubectl --context pause-20210813001951-679351 describe pod
helpers_test.go:276: (dbg) Non-zero exit: kubectl --context pause-20210813001951-679351 describe pod : exit status 1 (61.416172ms)
** stderr **
error: resource name may not be empty
** /stderr **
helpers_test.go:278: kubectl --context pause-20210813001951-679351 describe pod : exit status 1
--- FAIL: TestPause/serial/PauseAgain (10.84s)