Test Report: Docker_Linux_docker_arm64 19452

                    
                      667295c6870455ef3392c60a87bf7f5fdc211f00:2024-08-16:35803
                    
                

Test fail (1/343)

Order failed test Duration
372 TestStartStop/group/old-k8s-version/serial/SecondStart 380.63
x
+
TestStartStop/group/old-k8s-version/serial/SecondStart (380.63s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p old-k8s-version-894472 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.20.0
E0816 00:06:56.721477 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:58.933005 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:00.736599 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:02.579305 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:06.632105 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:11.226303 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:20.772464 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:38.929482 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:39.894447 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:47.593569 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:48.474932 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.075288 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.081858 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.093269 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.115058 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.156453 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.237844 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.399328 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.721213 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:00.362560 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:01.643956 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:04.205372 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:09.327758 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.345918 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.352364 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.363830 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.385200 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.426791 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.508227 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.669737 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.991737 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:19.569814 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:19.633157 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:20.915444 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:23.477800 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:28.599427 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:38.841468 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:40.051519 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:59.323625 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:01.815887 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:09.515839 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:12.859789 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:16.870102 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:21.012928 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:36.182576 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:36.765431 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:36.771775 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:36.783139 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:36.804479 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:36.846090 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:36.927585 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:37.088864 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:37.410538 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:38.052785 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:39.334345 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:40.285312 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:40.563190 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:41.895903 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:44.578867 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:47.017932 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:57.259388 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:10:17.740749 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:10:39.510524 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:10:42.934443 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:10:43.596401 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:10:58.702168 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:11:00.530856 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:11:02.206658 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:11:17.956762 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:11:25.654975 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:256: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p old-k8s-version-894472 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.20.0: exit status 102 (6m18.124373157s)

                                                
                                                
-- stdout --
	* [old-k8s-version-894472] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=19452
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Kubernetes 1.31.0 is now available. If you would like to upgrade, specify: --kubernetes-version=v1.31.0
	* Using the docker driver based on existing profile
	* Starting "old-k8s-version-894472" primary control-plane node in "old-k8s-version-894472" cluster
	* Pulling base image v0.0.44-1723740748-19452 ...
	* Restarting existing docker container for "old-k8s-version-894472" ...
	* Preparing Kubernetes v1.20.0 on Docker 27.1.2 ...
	* Verifying Kubernetes components...
	  - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	  - Using image fake.domain/registry.k8s.io/echoserver:1.4
	  - Using image docker.io/kubernetesui/dashboard:v2.7.0
	  - Using image registry.k8s.io/echoserver:1.4
	* Some dashboard features require the metrics-server addon. To enable all features please run:
	
		minikube -p old-k8s-version-894472 addons enable metrics-server
	
	* Enabled addons: default-storageclass, metrics-server, storage-provisioner, dashboard
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0816 00:06:56.478341 2407112 out.go:345] Setting OutFile to fd 1 ...
	I0816 00:06:56.478559 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0816 00:06:56.478582 2407112 out.go:358] Setting ErrFile to fd 2...
	I0816 00:06:56.478601 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0816 00:06:56.478891 2407112 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
	I0816 00:06:56.479286 2407112 out.go:352] Setting JSON to false
	I0816 00:06:56.480283 2407112 start.go:129] hostinfo: {"hostname":"ip-172-31-29-130","uptime":31761,"bootTime":1723735056,"procs":208,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1067-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"36adf542-ef4f-4e2d-a0c8-6868d1383ff9"}
	I0816 00:06:56.480386 2407112 start.go:139] virtualization:  
	I0816 00:06:56.484771 2407112 out.go:177] * [old-k8s-version-894472] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	I0816 00:06:56.489078 2407112 out.go:177]   - MINIKUBE_LOCATION=19452
	I0816 00:06:56.489146 2407112 notify.go:220] Checking for updates...
	I0816 00:06:56.495252 2407112 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0816 00:06:56.497537 2407112 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig
	I0816 00:06:56.499696 2407112 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube
	I0816 00:06:56.502149 2407112 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I0816 00:06:56.503818 2407112 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0816 00:06:56.506603 2407112 config.go:182] Loaded profile config "old-k8s-version-894472": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.20.0
	I0816 00:06:56.509199 2407112 out.go:177] * Kubernetes 1.31.0 is now available. If you would like to upgrade, specify: --kubernetes-version=v1.31.0
	I0816 00:06:56.510758 2407112 driver.go:392] Setting default libvirt URI to qemu:///system
	I0816 00:06:56.542297 2407112 docker.go:123] docker version: linux-27.1.2:Docker Engine - Community
	I0816 00:06:56.542421 2407112 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0816 00:06:56.622084 2407112 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:2 ContainersRunning:1 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:34 OomKillDisable:true NGoroutines:53 SystemTime:2024-08-16 00:06:56.612263818 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
	I0816 00:06:56.622193 2407112 docker.go:307] overlay module found
	I0816 00:06:56.624971 2407112 out.go:177] * Using the docker driver based on existing profile
	I0816 00:06:56.627017 2407112 start.go:297] selected driver: docker
	I0816 00:06:56.627040 2407112 start.go:901] validating driver "docker" against &{Name:old-k8s-version-894472 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-894472 Namespace:default APIServerHAVIP: AP
IServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountSt
ring:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0816 00:06:56.627160 2407112 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0816 00:06:56.627741 2407112 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0816 00:06:56.712650 2407112 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:2 ContainersRunning:1 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:34 OomKillDisable:true NGoroutines:53 SystemTime:2024-08-16 00:06:56.703145608 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
	I0816 00:06:56.713018 2407112 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0816 00:06:56.713048 2407112 cni.go:84] Creating CNI manager for ""
	I0816 00:06:56.713068 2407112 cni.go:162] CNI unnecessary in this configuration, recommending no CNI
	I0816 00:06:56.713112 2407112 start.go:340] cluster config:
	{Name:old-k8s-version-894472 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-894472 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountI
P: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0816 00:06:56.716540 2407112 out.go:177] * Starting "old-k8s-version-894472" primary control-plane node in "old-k8s-version-894472" cluster
	I0816 00:06:56.718269 2407112 cache.go:121] Beginning downloading kic base image for docker with docker
	I0816 00:06:56.720160 2407112 out.go:177] * Pulling base image v0.0.44-1723740748-19452 ...
	I0816 00:06:56.721981 2407112 preload.go:131] Checking if preload exists for k8s version v1.20.0 and runtime docker
	I0816 00:06:56.722039 2407112 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4
	I0816 00:06:56.722052 2407112 cache.go:56] Caching tarball of preloaded images
	I0816 00:06:56.722141 2407112 preload.go:172] Found /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4 in cache, skipping download
	I0816 00:06:56.722155 2407112 cache.go:59] Finished verifying existence of preloaded tar for v1.20.0 on docker
	I0816 00:06:56.722340 2407112 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local docker daemon
	I0816 00:06:56.722549 2407112 profile.go:143] Saving config to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/config.json ...
	W0816 00:06:56.744934 2407112 image.go:95] image gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d is of wrong architecture
	I0816 00:06:56.744952 2407112 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d to local cache
	I0816 00:06:56.745027 2407112 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local cache directory
	I0816 00:06:56.745044 2407112 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local cache directory, skipping pull
	I0816 00:06:56.745049 2407112 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d exists in cache, skipping pull
	I0816 00:06:56.745057 2407112 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d as a tarball
	I0816 00:06:56.745063 2407112 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d from local cache
	I0816 00:06:56.872136 2407112 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d from cached tarball
	I0816 00:06:56.872178 2407112 cache.go:194] Successfully downloaded all kic artifacts
	I0816 00:06:56.872221 2407112 start.go:360] acquireMachinesLock for old-k8s-version-894472: {Name:mkc65b883f793322a5198592ea6258fdb5d12c1e Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0816 00:06:56.872296 2407112 start.go:364] duration metric: took 41.902µs to acquireMachinesLock for "old-k8s-version-894472"
	I0816 00:06:56.872323 2407112 start.go:96] Skipping create...Using existing machine configuration
	I0816 00:06:56.872335 2407112 fix.go:54] fixHost starting: 
	I0816 00:06:56.872625 2407112 cli_runner.go:164] Run: docker container inspect old-k8s-version-894472 --format={{.State.Status}}
	I0816 00:06:56.889006 2407112 fix.go:112] recreateIfNeeded on old-k8s-version-894472: state=Stopped err=<nil>
	W0816 00:06:56.889034 2407112 fix.go:138] unexpected machine state, will restart: <nil>
	I0816 00:06:56.891153 2407112 out.go:177] * Restarting existing docker container for "old-k8s-version-894472" ...
	I0816 00:06:56.893031 2407112 cli_runner.go:164] Run: docker start old-k8s-version-894472
	I0816 00:06:57.203439 2407112 cli_runner.go:164] Run: docker container inspect old-k8s-version-894472 --format={{.State.Status}}
	I0816 00:06:57.229792 2407112 kic.go:430] container "old-k8s-version-894472" state is running.
	I0816 00:06:57.231986 2407112 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-894472
	I0816 00:06:57.253055 2407112 profile.go:143] Saving config to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/config.json ...
	I0816 00:06:57.253286 2407112 machine.go:93] provisionDockerMachine start ...
	I0816 00:06:57.253348 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
	I0816 00:06:57.277267 2407112 main.go:141] libmachine: Using SSH client type: native
	I0816 00:06:57.277532 2407112 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil>  [] 0s} 127.0.0.1 35084 <nil> <nil>}
	I0816 00:06:57.277547 2407112 main.go:141] libmachine: About to run SSH command:
	hostname
	I0816 00:06:57.278662 2407112 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: EOF
	I0816 00:07:00.417721 2407112 main.go:141] libmachine: SSH cmd err, output: <nil>: old-k8s-version-894472
	
	I0816 00:07:00.417744 2407112 ubuntu.go:169] provisioning hostname "old-k8s-version-894472"
	I0816 00:07:00.417828 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
	I0816 00:07:00.437465 2407112 main.go:141] libmachine: Using SSH client type: native
	I0816 00:07:00.437790 2407112 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil>  [] 0s} 127.0.0.1 35084 <nil> <nil>}
	I0816 00:07:00.437812 2407112 main.go:141] libmachine: About to run SSH command:
	sudo hostname old-k8s-version-894472 && echo "old-k8s-version-894472" | sudo tee /etc/hostname
	I0816 00:07:00.596905 2407112 main.go:141] libmachine: SSH cmd err, output: <nil>: old-k8s-version-894472
	
	I0816 00:07:00.596993 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
	I0816 00:07:00.614217 2407112 main.go:141] libmachine: Using SSH client type: native
	I0816 00:07:00.614480 2407112 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil>  [] 0s} 127.0.0.1 35084 <nil> <nil>}
	I0816 00:07:00.614502 2407112 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\sold-k8s-version-894472' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-894472/g' /etc/hosts;
				else 
					echo '127.0.1.1 old-k8s-version-894472' | sudo tee -a /etc/hosts; 
				fi
			fi
	I0816 00:07:00.755319 2407112 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0816 00:07:00.755351 2407112 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19452-2026001/.minikube CaCertPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19452-2026001/.minikube}
	I0816 00:07:00.755372 2407112 ubuntu.go:177] setting up certificates
	I0816 00:07:00.755382 2407112 provision.go:84] configureAuth start
	I0816 00:07:00.755449 2407112 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-894472
	I0816 00:07:00.774680 2407112 provision.go:143] copyHostCerts
	I0816 00:07:00.774750 2407112 exec_runner.go:144] found /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.pem, removing ...
	I0816 00:07:00.774759 2407112 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.pem
	I0816 00:07:00.774822 2407112 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.pem (1082 bytes)
	I0816 00:07:00.774914 2407112 exec_runner.go:144] found /home/jenkins/minikube-integration/19452-2026001/.minikube/cert.pem, removing ...
	I0816 00:07:00.774920 2407112 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19452-2026001/.minikube/cert.pem
	I0816 00:07:00.774939 2407112 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19452-2026001/.minikube/cert.pem (1123 bytes)
	I0816 00:07:00.774989 2407112 exec_runner.go:144] found /home/jenkins/minikube-integration/19452-2026001/.minikube/key.pem, removing ...
	I0816 00:07:00.774993 2407112 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19452-2026001/.minikube/key.pem
	I0816 00:07:00.775011 2407112 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19452-2026001/.minikube/key.pem (1675 bytes)
	I0816 00:07:00.775055 2407112 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-894472 san=[127.0.0.1 192.168.85.2 localhost minikube old-k8s-version-894472]
	I0816 00:07:01.005265 2407112 provision.go:177] copyRemoteCerts
	I0816 00:07:01.005372 2407112 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I0816 00:07:01.005471 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
	I0816 00:07:01.025729 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
	I0816 00:07:01.128226 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I0816 00:07:01.157884 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
	I0816 00:07:01.188711 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
	I0816 00:07:01.239191 2407112 provision.go:87] duration metric: took 483.794576ms to configureAuth
	I0816 00:07:01.239218 2407112 ubuntu.go:193] setting minikube options for container-runtime
	I0816 00:07:01.239421 2407112 config.go:182] Loaded profile config "old-k8s-version-894472": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.20.0
	I0816 00:07:01.239489 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
	I0816 00:07:01.258647 2407112 main.go:141] libmachine: Using SSH client type: native
	I0816 00:07:01.258917 2407112 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil>  [] 0s} 127.0.0.1 35084 <nil> <nil>}
	I0816 00:07:01.258928 2407112 main.go:141] libmachine: About to run SSH command:
	df --output=fstype / | tail -n 1
	I0816 00:07:01.394503 2407112 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
	
	I0816 00:07:01.394523 2407112 ubuntu.go:71] root file system type: overlay
	I0816 00:07:01.394635 2407112 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
	I0816 00:07:01.394702 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
	I0816 00:07:01.421992 2407112 main.go:141] libmachine: Using SSH client type: native
	I0816 00:07:01.422240 2407112 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil>  [] 0s} 127.0.0.1 35084 <nil> <nil>}
	I0816 00:07:01.422315 2407112 main.go:141] libmachine: About to run SSH command:
	sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	BindsTo=containerd.service
	After=network-online.target firewalld.service containerd.service
	Wants=network-online.target
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=on-failure
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP \$MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	
	[Install]
	WantedBy=multi-user.target
	" | sudo tee /lib/systemd/system/docker.service.new
	I0816 00:07:01.592994 2407112 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	BindsTo=containerd.service
	After=network-online.target firewalld.service containerd.service
	Wants=network-online.target
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=on-failure
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP $MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	
	[Install]
	WantedBy=multi-user.target
	
	I0816 00:07:01.593262 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
	I0816 00:07:01.632745 2407112 main.go:141] libmachine: Using SSH client type: native
	I0816 00:07:01.633003 2407112 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil>  [] 0s} 127.0.0.1 35084 <nil> <nil>}
	I0816 00:07:01.633021 2407112 main.go:141] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
	I0816 00:07:01.808217 2407112 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0816 00:07:01.808256 2407112 machine.go:96] duration metric: took 4.554960543s to provisionDockerMachine
	I0816 00:07:01.808270 2407112 start.go:293] postStartSetup for "old-k8s-version-894472" (driver="docker")
	I0816 00:07:01.808286 2407112 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I0816 00:07:01.808433 2407112 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I0816 00:07:01.808497 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
	I0816 00:07:01.829776 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
	I0816 00:07:01.932858 2407112 ssh_runner.go:195] Run: cat /etc/os-release
	I0816 00:07:01.938971 2407112 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I0816 00:07:01.939136 2407112 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I0816 00:07:01.939248 2407112 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I0816 00:07:01.939316 2407112 info.go:137] Remote host: Ubuntu 22.04.4 LTS
	I0816 00:07:01.939351 2407112 filesync.go:126] Scanning /home/jenkins/minikube-integration/19452-2026001/.minikube/addons for local assets ...
	I0816 00:07:01.939510 2407112 filesync.go:126] Scanning /home/jenkins/minikube-integration/19452-2026001/.minikube/files for local assets ...
	I0816 00:07:01.939680 2407112 filesync.go:149] local asset: /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem -> 20313962.pem in /etc/ssl/certs
	I0816 00:07:01.939860 2407112 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I0816 00:07:01.952332 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem --> /etc/ssl/certs/20313962.pem (1708 bytes)
	I0816 00:07:01.988689 2407112 start.go:296] duration metric: took 180.394597ms for postStartSetup
	I0816 00:07:01.988871 2407112 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0816 00:07:01.988958 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
	I0816 00:07:02.013667 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
	I0816 00:07:02.116008 2407112 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I0816 00:07:02.124115 2407112 fix.go:56] duration metric: took 5.251770675s for fixHost
	I0816 00:07:02.124196 2407112 start.go:83] releasing machines lock for "old-k8s-version-894472", held for 5.251886275s
	I0816 00:07:02.124306 2407112 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-894472
	I0816 00:07:02.145566 2407112 ssh_runner.go:195] Run: cat /version.json
	I0816 00:07:02.145654 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
	I0816 00:07:02.145861 2407112 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I0816 00:07:02.145940 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
	I0816 00:07:02.165393 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
	I0816 00:07:02.191438 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
	I0816 00:07:02.406009 2407112 ssh_runner.go:195] Run: systemctl --version
	I0816 00:07:02.410660 2407112 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I0816 00:07:02.415242 2407112 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
	I0816 00:07:02.436603 2407112 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
	I0816 00:07:02.436768 2407112 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *bridge* -not -name *podman* -not -name *.mk_disabled -printf "%p, " -exec sh -c "sudo sed -i -r -e '/"dst": ".*:.*"/d' -e 's|^(.*)"dst": (.*)[,*]$|\1"dst": \2|g' -e '/"subnet": ".*:.*"/d' -e 's|^(.*)"subnet": ".*"(.*)[,*]$|\1"subnet": "10.244.0.0/16"\2|g' {}" ;
	I0816 00:07:02.460074 2407112 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *podman* -not -name *.mk_disabled -printf "%p, " -exec sh -c "sudo sed -i -r -e 's|^(.*)"subnet": ".*"(.*)$|\1"subnet": "10.244.0.0/16"\2|g' -e 's|^(.*)"gateway": ".*"(.*)$|\1"gateway": "10.244.0.1"\2|g' {}" ;
	I0816 00:07:02.480751 2407112 cni.go:308] configured [/etc/cni/net.d/100-crio-bridge.conf, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
	I0816 00:07:02.480823 2407112 start.go:495] detecting cgroup driver to use...
	I0816 00:07:02.480871 2407112 detect.go:187] detected "cgroupfs" cgroup driver on host os
	I0816 00:07:02.480999 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I0816 00:07:02.503673 2407112 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.2"|' /etc/containerd/config.toml"
	I0816 00:07:02.515774 2407112 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I0816 00:07:02.527763 2407112 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
	I0816 00:07:02.527880 2407112 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I0816 00:07:02.540061 2407112 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0816 00:07:02.551481 2407112 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I0816 00:07:02.562500 2407112 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0816 00:07:02.574034 2407112 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I0816 00:07:02.586375 2407112 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I0816 00:07:02.598054 2407112 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I0816 00:07:02.608932 2407112 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I0816 00:07:02.622182 2407112 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0816 00:07:02.742535 2407112 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I0816 00:07:02.903514 2407112 start.go:495] detecting cgroup driver to use...
	I0816 00:07:02.903563 2407112 detect.go:187] detected "cgroupfs" cgroup driver on host os
	I0816 00:07:02.903623 2407112 ssh_runner.go:195] Run: sudo systemctl cat docker.service
	I0816 00:07:02.938058 2407112 cruntime.go:279] skipping containerd shutdown because we are bound to it
	I0816 00:07:02.938150 2407112 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I0816 00:07:02.955198 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/dockershim.sock
	" | sudo tee /etc/crictl.yaml"
	I0816 00:07:02.985653 2407112 ssh_runner.go:195] Run: which cri-dockerd
	I0816 00:07:02.996916 2407112 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
	I0816 00:07:03.017552 2407112 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (189 bytes)
	I0816 00:07:03.051778 2407112 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
	I0816 00:07:03.213417 2407112 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
	I0816 00:07:03.398809 2407112 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
	I0816 00:07:03.398972 2407112 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
	I0816 00:07:03.430030 2407112 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0816 00:07:03.595160 2407112 ssh_runner.go:195] Run: sudo systemctl restart docker
	I0816 00:07:04.243426 2407112 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0816 00:07:04.274355 2407112 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0816 00:07:04.314372 2407112 out.go:235] * Preparing Kubernetes v1.20.0 on Docker 27.1.2 ...
	I0816 00:07:04.314465 2407112 cli_runner.go:164] Run: docker network inspect old-k8s-version-894472 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0816 00:07:04.338351 2407112 ssh_runner.go:195] Run: grep 192.168.85.1	host.minikube.internal$ /etc/hosts
	I0816 00:07:04.342439 2407112 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0816 00:07:04.354667 2407112 kubeadm.go:883] updating cluster {Name:old-k8s-version-894472 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-894472 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkin
s:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
	I0816 00:07:04.354805 2407112 preload.go:131] Checking if preload exists for k8s version v1.20.0 and runtime docker
	I0816 00:07:04.354861 2407112 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I0816 00:07:04.374314 2407112 docker.go:685] Got preloaded images: -- stdout --
	gcr.io/k8s-minikube/storage-provisioner:v5
	k8s.gcr.io/kube-proxy:v1.20.0
	registry.k8s.io/kube-proxy:v1.20.0
	k8s.gcr.io/kube-controller-manager:v1.20.0
	registry.k8s.io/kube-controller-manager:v1.20.0
	k8s.gcr.io/kube-apiserver:v1.20.0
	registry.k8s.io/kube-apiserver:v1.20.0
	k8s.gcr.io/kube-scheduler:v1.20.0
	registry.k8s.io/kube-scheduler:v1.20.0
	k8s.gcr.io/etcd:3.4.13-0
	registry.k8s.io/etcd:3.4.13-0
	k8s.gcr.io/coredns:1.7.0
	registry.k8s.io/coredns:1.7.0
	k8s.gcr.io/pause:3.2
	registry.k8s.io/pause:3.2
	gcr.io/k8s-minikube/busybox:1.28.4-glibc
	
	-- /stdout --
	I0816 00:07:04.374337 2407112 docker.go:615] Images already preloaded, skipping extraction
	I0816 00:07:04.374398 2407112 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I0816 00:07:04.393054 2407112 docker.go:685] Got preloaded images: -- stdout --
	gcr.io/k8s-minikube/storage-provisioner:v5
	k8s.gcr.io/kube-proxy:v1.20.0
	registry.k8s.io/kube-proxy:v1.20.0
	k8s.gcr.io/kube-apiserver:v1.20.0
	registry.k8s.io/kube-apiserver:v1.20.0
	k8s.gcr.io/kube-controller-manager:v1.20.0
	registry.k8s.io/kube-controller-manager:v1.20.0
	k8s.gcr.io/kube-scheduler:v1.20.0
	registry.k8s.io/kube-scheduler:v1.20.0
	k8s.gcr.io/etcd:3.4.13-0
	registry.k8s.io/etcd:3.4.13-0
	k8s.gcr.io/coredns:1.7.0
	registry.k8s.io/coredns:1.7.0
	k8s.gcr.io/pause:3.2
	registry.k8s.io/pause:3.2
	gcr.io/k8s-minikube/busybox:1.28.4-glibc
	
	-- /stdout --
	I0816 00:07:04.393081 2407112 cache_images.go:84] Images are preloaded, skipping loading
	I0816 00:07:04.393093 2407112 kubeadm.go:934] updating node { 192.168.85.2 8443 v1.20.0 docker true true} ...
	I0816 00:07:04.393221 2407112 kubeadm.go:946] kubelet [Unit]
	Wants=docker.socket
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.20.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime=docker --hostname-override=old-k8s-version-894472 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
	
	[Install]
	 config:
	{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-894472 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
	I0816 00:07:04.393302 2407112 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
	I0816 00:07:04.488139 2407112 cni.go:84] Creating CNI manager for ""
	I0816 00:07:04.488170 2407112 cni.go:162] CNI unnecessary in this configuration, recommending no CNI
	I0816 00:07:04.488179 2407112 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
	I0816 00:07:04.488199 2407112 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.20.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-894472 NodeName:old-k8s-version-894472 DNSDomain:cluster.local CRISocket:/var/run/dockershim.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticP
odPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:false}
	I0816 00:07:04.488353 2407112 kubeadm.go:187] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta2
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.85.2
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: /var/run/dockershim.sock
	  name: "old-k8s-version-894472"
	  kubeletExtraArgs:
	    node-ip: 192.168.85.2
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta2
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
	  extraArgs:
	    enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    allocate-node-cidrs: "true"
	    leader-elect: "false"
	scheduler:
	  extraArgs:
	    leader-elect: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	dns:
	  type: CoreDNS
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	    extraArgs:
	      proxy-refresh-interval: "70000"
	kubernetesVersion: v1.20.0
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%"
	  nodefs.inodesFree: "0%"
	  imagefs.available: "0%"
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I0816 00:07:04.488426 2407112 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.20.0
	I0816 00:07:04.502377 2407112 binaries.go:44] Found k8s binaries, skipping transfer
	I0816 00:07:04.502449 2407112 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I0816 00:07:04.512344 2407112 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (348 bytes)
	I0816 00:07:04.534450 2407112 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I0816 00:07:04.559341 2407112 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2118 bytes)
	I0816 00:07:04.581498 2407112 ssh_runner.go:195] Run: grep 192.168.85.2	control-plane.minikube.internal$ /etc/hosts
	I0816 00:07:04.585190 2407112 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0816 00:07:04.596978 2407112 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0816 00:07:04.712358 2407112 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I0816 00:07:04.728930 2407112 certs.go:68] Setting up /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472 for IP: 192.168.85.2
	I0816 00:07:04.728956 2407112 certs.go:194] generating shared ca certs ...
	I0816 00:07:04.728973 2407112 certs.go:226] acquiring lock for ca certs: {Name:mkddf294a5c2bc6874920ab9b3e5ac4767302c25 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0816 00:07:04.729115 2407112 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.key
	I0816 00:07:04.729163 2407112 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/proxy-client-ca.key
	I0816 00:07:04.729174 2407112 certs.go:256] generating profile certs ...
	I0816 00:07:04.729258 2407112 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.key
	I0816 00:07:04.729331 2407112 certs.go:359] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/apiserver.key.8284e299
	I0816 00:07:04.729376 2407112 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/proxy-client.key
	I0816 00:07:04.729502 2407112 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/2031396.pem (1338 bytes)
	W0816 00:07:04.729537 2407112 certs.go:480] ignoring /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/2031396_empty.pem, impossibly tiny 0 bytes
	I0816 00:07:04.729550 2407112 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca-key.pem (1679 bytes)
	I0816 00:07:04.729576 2407112 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem (1082 bytes)
	I0816 00:07:04.729685 2407112 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/cert.pem (1123 bytes)
	I0816 00:07:04.729715 2407112 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/key.pem (1675 bytes)
	I0816 00:07:04.729767 2407112 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem (1708 bytes)
	I0816 00:07:04.730391 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I0816 00:07:04.790545 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
	I0816 00:07:04.872879 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I0816 00:07:04.962973 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
	I0816 00:07:05.054747 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
	I0816 00:07:05.189932 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
	I0816 00:07:05.279413 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I0816 00:07:05.312601 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
	I0816 00:07:05.348127 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I0816 00:07:05.383332 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/2031396.pem --> /usr/share/ca-certificates/2031396.pem (1338 bytes)
	I0816 00:07:05.416757 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem --> /usr/share/ca-certificates/20313962.pem (1708 bytes)
	I0816 00:07:05.456103 2407112 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I0816 00:07:05.491819 2407112 ssh_runner.go:195] Run: openssl version
	I0816 00:07:05.498791 2407112 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I0816 00:07:05.514242 2407112 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I0816 00:07:05.520526 2407112 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Aug 15 23:06 /usr/share/ca-certificates/minikubeCA.pem
	I0816 00:07:05.520604 2407112 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I0816 00:07:05.535660 2407112 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I0816 00:07:05.551416 2407112 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/2031396.pem && ln -fs /usr/share/ca-certificates/2031396.pem /etc/ssl/certs/2031396.pem"
	I0816 00:07:05.562633 2407112 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2031396.pem
	I0816 00:07:05.572066 2407112 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Aug 15 23:13 /usr/share/ca-certificates/2031396.pem
	I0816 00:07:05.572134 2407112 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2031396.pem
	I0816 00:07:05.582682 2407112 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/2031396.pem /etc/ssl/certs/51391683.0"
	I0816 00:07:05.593525 2407112 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/20313962.pem && ln -fs /usr/share/ca-certificates/20313962.pem /etc/ssl/certs/20313962.pem"
	I0816 00:07:05.607678 2407112 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/20313962.pem
	I0816 00:07:05.611743 2407112 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Aug 15 23:13 /usr/share/ca-certificates/20313962.pem
	I0816 00:07:05.611810 2407112 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/20313962.pem
	I0816 00:07:05.624432 2407112 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/20313962.pem /etc/ssl/certs/3ec20f2e.0"
	I0816 00:07:05.638734 2407112 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
	I0816 00:07:05.642770 2407112 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
	I0816 00:07:05.651056 2407112 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
	I0816 00:07:05.661346 2407112 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
	I0816 00:07:05.670401 2407112 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
	I0816 00:07:05.680326 2407112 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
	I0816 00:07:05.691198 2407112 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
	I0816 00:07:05.703145 2407112 kubeadm.go:392] StartCluster: {Name:old-k8s-version-894472 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-894472 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/
minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0816 00:07:05.703342 2407112 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I0816 00:07:05.738014 2407112 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I0816 00:07:05.748400 2407112 kubeadm.go:408] found existing configuration files, will attempt cluster restart
	I0816 00:07:05.748423 2407112 kubeadm.go:593] restartPrimaryControlPlane start ...
	I0816 00:07:05.748492 2407112 ssh_runner.go:195] Run: sudo test -d /data/minikube
	I0816 00:07:05.761251 2407112 kubeadm.go:130] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
	stdout:
	
	stderr:
	I0816 00:07:05.761923 2407112 kubeconfig.go:47] verify endpoint returned: get endpoint: "old-k8s-version-894472" does not appear in /home/jenkins/minikube-integration/19452-2026001/kubeconfig
	I0816 00:07:05.762213 2407112 kubeconfig.go:62] /home/jenkins/minikube-integration/19452-2026001/kubeconfig needs updating (will repair): [kubeconfig missing "old-k8s-version-894472" cluster setting kubeconfig missing "old-k8s-version-894472" context setting]
	I0816 00:07:05.762733 2407112 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/kubeconfig: {Name:mkb1a4d12f06c0f193e7cb7c118eeb997c3969bc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0816 00:07:05.764582 2407112 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
	I0816 00:07:05.775566 2407112 kubeadm.go:630] The running cluster does not require reconfiguration: 192.168.85.2
	I0816 00:07:05.775599 2407112 kubeadm.go:597] duration metric: took 27.169035ms to restartPrimaryControlPlane
	I0816 00:07:05.775608 2407112 kubeadm.go:394] duration metric: took 72.496933ms to StartCluster
	I0816 00:07:05.775631 2407112 settings.go:142] acquiring lock: {Name:mkd932093f6b6db884e5d5f97d2ea9be134ab309 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0816 00:07:05.775691 2407112 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/19452-2026001/kubeconfig
	I0816 00:07:05.776766 2407112 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/kubeconfig: {Name:mkb1a4d12f06c0f193e7cb7c118eeb997c3969bc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0816 00:07:05.776976 2407112 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}
	I0816 00:07:05.777314 2407112 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:true default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
	I0816 00:07:05.777409 2407112 addons.go:69] Setting storage-provisioner=true in profile "old-k8s-version-894472"
	I0816 00:07:05.777436 2407112 addons.go:234] Setting addon storage-provisioner=true in "old-k8s-version-894472"
	W0816 00:07:05.777444 2407112 addons.go:243] addon storage-provisioner should already be in state true
	I0816 00:07:05.777468 2407112 host.go:66] Checking if "old-k8s-version-894472" exists ...
	I0816 00:07:05.778242 2407112 cli_runner.go:164] Run: docker container inspect old-k8s-version-894472 --format={{.State.Status}}
	I0816 00:07:05.778448 2407112 config.go:182] Loaded profile config "old-k8s-version-894472": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.20.0
	I0816 00:07:05.778494 2407112 addons.go:69] Setting default-storageclass=true in profile "old-k8s-version-894472"
	I0816 00:07:05.778527 2407112 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-894472"
	I0816 00:07:05.778768 2407112 cli_runner.go:164] Run: docker container inspect old-k8s-version-894472 --format={{.State.Status}}
	I0816 00:07:05.779142 2407112 addons.go:69] Setting metrics-server=true in profile "old-k8s-version-894472"
	I0816 00:07:05.779169 2407112 addons.go:234] Setting addon metrics-server=true in "old-k8s-version-894472"
	W0816 00:07:05.779178 2407112 addons.go:243] addon metrics-server should already be in state true
	I0816 00:07:05.779199 2407112 host.go:66] Checking if "old-k8s-version-894472" exists ...
	I0816 00:07:05.779588 2407112 cli_runner.go:164] Run: docker container inspect old-k8s-version-894472 --format={{.State.Status}}
	I0816 00:07:05.780076 2407112 addons.go:69] Setting dashboard=true in profile "old-k8s-version-894472"
	I0816 00:07:05.780103 2407112 addons.go:234] Setting addon dashboard=true in "old-k8s-version-894472"
	W0816 00:07:05.780110 2407112 addons.go:243] addon dashboard should already be in state true
	I0816 00:07:05.780132 2407112 host.go:66] Checking if "old-k8s-version-894472" exists ...
	I0816 00:07:05.780521 2407112 cli_runner.go:164] Run: docker container inspect old-k8s-version-894472 --format={{.State.Status}}
	I0816 00:07:05.781340 2407112 out.go:177] * Verifying Kubernetes components...
	I0816 00:07:05.786896 2407112 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0816 00:07:05.846736 2407112 out.go:177]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I0816 00:07:05.848963 2407112 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I0816 00:07:05.848982 2407112 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I0816 00:07:05.849044 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
	I0816 00:07:05.849811 2407112 out.go:177]   - Using image fake.domain/registry.k8s.io/echoserver:1.4
	I0816 00:07:05.852051 2407112 out.go:177]   - Using image docker.io/kubernetesui/dashboard:v2.7.0
	I0816 00:07:05.854285 2407112 out.go:177]   - Using image registry.k8s.io/echoserver:1.4
	I0816 00:07:05.854342 2407112 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
	I0816 00:07:05.854362 2407112 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
	I0816 00:07:05.854426 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
	I0816 00:07:05.855926 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-ns.yaml
	I0816 00:07:05.855950 2407112 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
	I0816 00:07:05.856011 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
	I0816 00:07:05.862793 2407112 addons.go:234] Setting addon default-storageclass=true in "old-k8s-version-894472"
	W0816 00:07:05.862818 2407112 addons.go:243] addon default-storageclass should already be in state true
	I0816 00:07:05.862845 2407112 host.go:66] Checking if "old-k8s-version-894472" exists ...
	I0816 00:07:05.863265 2407112 cli_runner.go:164] Run: docker container inspect old-k8s-version-894472 --format={{.State.Status}}
	I0816 00:07:05.937765 2407112 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
	I0816 00:07:05.937786 2407112 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I0816 00:07:05.937849 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
	I0816 00:07:05.938903 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
	I0816 00:07:05.939799 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
	I0816 00:07:05.949373 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
	I0816 00:07:05.981218 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
	I0816 00:07:06.080695 2407112 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I0816 00:07:06.150247 2407112 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-894472" to be "Ready" ...
	I0816 00:07:06.191728 2407112 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
	I0816 00:07:06.191762 2407112 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1825 bytes)
	I0816 00:07:06.239892 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
	I0816 00:07:06.239922 2407112 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
	I0816 00:07:06.258370 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
	I0816 00:07:06.270746 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
	I0816 00:07:06.301914 2407112 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
	I0816 00:07:06.301941 2407112 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
	I0816 00:07:06.363677 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
	I0816 00:07:06.363704 2407112 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
	I0816 00:07:06.391527 2407112 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
	I0816 00:07:06.391554 2407112 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
	I0816 00:07:06.484518 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-configmap.yaml
	I0816 00:07:06.484554 2407112 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
	I0816 00:07:06.548418 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	I0816 00:07:06.627810 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-dp.yaml
	I0816 00:07:06.627839 2407112 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4201 bytes)
	W0816 00:07:06.641267 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:06.641316 2407112 retry.go:31] will retry after 127.065326ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	W0816 00:07:06.641386 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:06.641400 2407112 retry.go:31] will retry after 277.783153ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:06.701939 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-role.yaml
	I0816 00:07:06.701977 2407112 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
	I0816 00:07:06.750955 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
	I0816 00:07:06.750997 2407112 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
	I0816 00:07:06.769342 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
	W0816 00:07:06.813658 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:06.813693 2407112 retry.go:31] will retry after 153.642197ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:06.836149 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-sa.yaml
	I0816 00:07:06.836175 2407112 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
	I0816 00:07:06.911117 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-secret.yaml
	I0816 00:07:06.911159 2407112 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
	I0816 00:07:06.919341 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
	I0816 00:07:06.967699 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	I0816 00:07:07.025039 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-svc.yaml
	I0816 00:07:07.025069 2407112 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
	W0816 00:07:07.029064 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:07.029102 2407112 retry.go:31] will retry after 509.569838ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:07.088653 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
	W0816 00:07:07.308033 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:07.308110 2407112 retry.go:31] will retry after 539.39958ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	W0816 00:07:07.373234 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:07.373320 2407112 retry.go:31] will retry after 400.225326ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	W0816 00:07:07.402944 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:07.403044 2407112 retry.go:31] will retry after 342.699121ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:07.539189 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
	W0816 00:07:07.655104 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:07.655181 2407112 retry.go:31] will retry after 696.953315ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:07.746537 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
	I0816 00:07:07.773936 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	I0816 00:07:07.848437 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
	W0816 00:07:07.926149 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:07.926231 2407112 retry.go:31] will retry after 332.717558ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	W0816 00:07:08.022145 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:08.022230 2407112 retry.go:31] will retry after 610.563639ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	W0816 00:07:08.082574 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:08.082667 2407112 retry.go:31] will retry after 378.792115ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:08.151166 2407112 node_ready.go:53] error getting node "old-k8s-version-894472": Get "https://192.168.85.2:8443/api/v1/nodes/old-k8s-version-894472": dial tcp 192.168.85.2:8443: connect: connection refused
	I0816 00:07:08.259415 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
	I0816 00:07:08.352929 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
	W0816 00:07:08.404103 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:08.404189 2407112 retry.go:31] will retry after 403.549215ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:08.462482 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
	W0816 00:07:08.522695 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:08.522775 2407112 retry.go:31] will retry after 935.790118ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:08.633975 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	W0816 00:07:08.775232 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:08.775313 2407112 retry.go:31] will retry after 819.876638ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:08.808618 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
	W0816 00:07:09.055251 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:09.055334 2407112 retry.go:31] will retry after 1.191679901s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	W0816 00:07:09.175621 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:09.175714 2407112 retry.go:31] will retry after 632.221449ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:09.459073 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
	I0816 00:07:09.595794 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
	W0816 00:07:09.645174 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:09.645258 2407112 retry.go:31] will retry after 932.634043ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:09.808967 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
	W0816 00:07:09.950456 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:09.950487 2407112 retry.go:31] will retry after 1.49646314s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	W0816 00:07:10.197148 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:10.197180 2407112 retry.go:31] will retry after 1.403935272s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:10.247495 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	W0816 00:07:10.555903 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:10.555934 2407112 retry.go:31] will retry after 1.661575233s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:10.578252 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
	I0816 00:07:10.650868 2407112 node_ready.go:53] error getting node "old-k8s-version-894472": Get "https://192.168.85.2:8443/api/v1/nodes/old-k8s-version-894472": dial tcp 192.168.85.2:8443: connect: connection refused
	W0816 00:07:10.893554 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:10.893582 2407112 retry.go:31] will retry after 2.548067216s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
	stdout:
	
	stderr:
	The connection to the server localhost:8443 was refused - did you specify the right host or port?
	I0816 00:07:11.447619 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
	I0816 00:07:11.601312 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
	I0816 00:07:12.218552 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	I0816 00:07:13.442147 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
	I0816 00:07:21.651926 2407112 node_ready.go:53] error getting node "old-k8s-version-894472": Get "https://192.168.85.2:8443/api/v1/nodes/old-k8s-version-894472": net/http: TLS handshake timeout
	I0816 00:07:22.623851 2407112 node_ready.go:49] node "old-k8s-version-894472" has status "Ready":"True"
	I0816 00:07:22.623879 2407112 node_ready.go:38] duration metric: took 16.473596765s for node "old-k8s-version-894472" to be "Ready" ...
	I0816 00:07:22.623890 2407112 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0816 00:07:22.762212 2407112 pod_ready.go:79] waiting up to 6m0s for pod "coredns-74ff55c5b-9l4p8" in "kube-system" namespace to be "Ready" ...
	I0816 00:07:23.293536 2407112 pod_ready.go:98] error getting pod "coredns-74ff55c5b-9l4p8" in "kube-system" namespace (skipping!): pods "coredns-74ff55c5b-9l4p8" not found
	I0816 00:07:23.293634 2407112 pod_ready.go:82] duration metric: took 531.341411ms for pod "coredns-74ff55c5b-9l4p8" in "kube-system" namespace to be "Ready" ...
	E0816 00:07:23.293662 2407112 pod_ready.go:67] WaitExtra: waitPodCondition: error getting pod "coredns-74ff55c5b-9l4p8" in "kube-system" namespace (skipping!): pods "coredns-74ff55c5b-9l4p8" not found
	I0816 00:07:23.293703 2407112 pod_ready.go:79] waiting up to 6m0s for pod "coredns-74ff55c5b-jrtq9" in "kube-system" namespace to be "Ready" ...
	I0816 00:07:24.108036 2407112 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: (12.660384019s)
	I0816 00:07:24.481961 2407112 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (12.263370982s)
	I0816 00:07:24.482049 2407112 addons.go:475] Verifying addon metrics-server=true in "old-k8s-version-894472"
	I0816 00:07:24.482056 2407112 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (12.880707038s)
	I0816 00:07:24.482026 2407112 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: (11.039847175s)
	I0816 00:07:24.484468 2407112 out.go:177] * Some dashboard features require the metrics-server addon. To enable all features please run:
	
		minikube -p old-k8s-version-894472 addons enable metrics-server
	
	I0816 00:07:24.486656 2407112 out.go:177] * Enabled addons: default-storageclass, metrics-server, storage-provisioner, dashboard
	I0816 00:07:24.488770 2407112 addons.go:510] duration metric: took 18.711448659s for enable addons: enabled=[default-storageclass metrics-server storage-provisioner dashboard]
	I0816 00:07:25.310415 2407112 pod_ready.go:103] pod "coredns-74ff55c5b-jrtq9" in "kube-system" namespace has status "Ready":"False"
	I0816 00:07:27.799471 2407112 pod_ready.go:103] pod "coredns-74ff55c5b-jrtq9" in "kube-system" namespace has status "Ready":"False"
	I0816 00:07:29.799679 2407112 pod_ready.go:103] pod "coredns-74ff55c5b-jrtq9" in "kube-system" namespace has status "Ready":"False"
	I0816 00:07:31.800888 2407112 pod_ready.go:103] pod "coredns-74ff55c5b-jrtq9" in "kube-system" namespace has status "Ready":"False"
	I0816 00:07:33.799943 2407112 pod_ready.go:93] pod "coredns-74ff55c5b-jrtq9" in "kube-system" namespace has status "Ready":"True"
	I0816 00:07:33.799972 2407112 pod_ready.go:82] duration metric: took 10.506239661s for pod "coredns-74ff55c5b-jrtq9" in "kube-system" namespace to be "Ready" ...
	I0816 00:07:33.799984 2407112 pod_ready.go:79] waiting up to 6m0s for pod "etcd-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
	I0816 00:07:35.806471 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:07:37.806695 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:07:40.306706 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:07:42.357347 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:07:44.814869 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:07:47.307295 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:07:49.307405 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:07:51.806545 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:07:54.308419 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:07:56.805976 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:07:58.806677 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:00.806924 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:03.307232 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:05.806416 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:07.806957 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:10.317170 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:12.806238 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:14.806345 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:16.806766 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:18.807296 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:21.306239 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:23.307345 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:25.806390 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:28.306821 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:30.307617 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:32.806490 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:34.807202 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:37.305527 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:39.306419 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:41.306562 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:43.306649 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:45.806375 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:47.810884 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:50.307666 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:51.306835 2407112 pod_ready.go:93] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"True"
	I0816 00:08:51.306858 2407112 pod_ready.go:82] duration metric: took 1m17.506866593s for pod "etcd-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
	I0816 00:08:51.306869 2407112 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
	I0816 00:08:51.312013 2407112 pod_ready.go:93] pod "kube-apiserver-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"True"
	I0816 00:08:51.312039 2407112 pod_ready.go:82] duration metric: took 5.161937ms for pod "kube-apiserver-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
	I0816 00:08:51.312050 2407112 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
	I0816 00:08:51.317635 2407112 pod_ready.go:93] pod "kube-controller-manager-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"True"
	I0816 00:08:51.317709 2407112 pod_ready.go:82] duration metric: took 5.650024ms for pod "kube-controller-manager-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
	I0816 00:08:51.317737 2407112 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-4n8ls" in "kube-system" namespace to be "Ready" ...
	I0816 00:08:51.324382 2407112 pod_ready.go:93] pod "kube-proxy-4n8ls" in "kube-system" namespace has status "Ready":"True"
	I0816 00:08:51.324409 2407112 pod_ready.go:82] duration metric: took 6.647801ms for pod "kube-proxy-4n8ls" in "kube-system" namespace to be "Ready" ...
	I0816 00:08:51.324422 2407112 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
	I0816 00:08:52.332136 2407112 pod_ready.go:93] pod "kube-scheduler-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"True"
	I0816 00:08:52.332161 2407112 pod_ready.go:82] duration metric: took 1.007730969s for pod "kube-scheduler-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
	I0816 00:08:52.332173 2407112 pod_ready.go:79] waiting up to 6m0s for pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace to be "Ready" ...
	I0816 00:08:54.338045 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:56.839222 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:08:59.339056 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:01.842610 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:04.338232 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:06.338527 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:08.839007 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:10.843561 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:13.338661 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:15.339174 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:17.340960 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:19.838262 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:21.839210 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:24.340073 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:26.839863 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:29.337558 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:31.338827 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:33.340313 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:35.838874 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:38.338237 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:40.840120 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:43.337461 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:45.339730 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:47.838705 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:50.345545 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:52.838982 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:55.338624 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:57.838361 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:09:59.839809 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:02.339011 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:04.838809 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:07.337842 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:09.340016 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:11.839360 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:13.843229 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:16.338914 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:18.340058 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:20.837772 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:22.838039 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:24.838979 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:27.337589 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:29.337725 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:31.338409 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:33.339134 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:35.838881 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:37.842939 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:40.338883 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:42.837905 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:44.842032 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:47.338504 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:49.839334 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:52.339057 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:54.838761 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:57.338096 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:10:59.338957 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:01.839308 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:03.841056 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:06.338040 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:08.339556 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:10.839459 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:13.338749 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:15.839574 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:18.337929 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:20.338790 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:22.339116 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:24.840378 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:27.338241 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:29.852230 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:32.338799 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:34.838985 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:37.345046 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:39.838474 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:42.338170 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:44.362656 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:46.838875 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:48.841654 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:51.338832 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:53.343372 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:55.838560 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:57.838907 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:59.839494 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:01.841503 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:04.338013 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:06.340432 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:08.847475 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:11.339958 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:13.341238 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:15.358241 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:17.842997 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:20.343831 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:22.839620 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:25.338275 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:27.338503 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:29.338983 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:31.839009 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:34.338847 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:36.339743 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:38.839318 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:41.337738 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:43.338933 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:45.838487 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:47.840406 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:50.337729 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:52.337948 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:52.337979 2407112 pod_ready.go:82] duration metric: took 4m0.00579824s for pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace to be "Ready" ...
	E0816 00:12:52.337989 2407112 pod_ready.go:67] WaitExtra: waitPodCondition: context deadline exceeded
	I0816 00:12:52.337996 2407112 pod_ready.go:39] duration metric: took 5m29.714095578s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0816 00:12:52.338015 2407112 api_server.go:52] waiting for apiserver process to appear ...
	I0816 00:12:52.338092 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
	I0816 00:12:52.356755 2407112 logs.go:276] 2 containers: [682baec10b08 3d14903eaff5]
	I0816 00:12:52.356831 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
	I0816 00:12:52.374920 2407112 logs.go:276] 2 containers: [5aacba0afc73 15f34ed96b2b]
	I0816 00:12:52.375012 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
	I0816 00:12:52.394135 2407112 logs.go:276] 2 containers: [0646646e2348 f583e4715841]
	I0816 00:12:52.394219 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
	I0816 00:12:52.411860 2407112 logs.go:276] 2 containers: [67be7ec054c6 003fa784026a]
	I0816 00:12:52.411939 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
	I0816 00:12:52.430627 2407112 logs.go:276] 2 containers: [c5eeddd51e95 1e79f4e5d490]
	I0816 00:12:52.430718 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
	I0816 00:12:52.448774 2407112 logs.go:276] 2 containers: [cc3ceefdfcf9 821653363c67]
	I0816 00:12:52.448883 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
	I0816 00:12:52.467313 2407112 logs.go:276] 0 containers: []
	W0816 00:12:52.467337 2407112 logs.go:278] No container was found matching "kindnet"
	I0816 00:12:52.467406 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
	I0816 00:12:52.488664 2407112 logs.go:276] 2 containers: [a6efcfb5cb17 de096650c620]
	I0816 00:12:52.488759 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kubernetes-dashboard --format={{.ID}}
	I0816 00:12:52.508042 2407112 logs.go:276] 1 containers: [3ef1e388df06]
	I0816 00:12:52.508089 2407112 logs.go:123] Gathering logs for kube-apiserver [3d14903eaff5] ...
	I0816 00:12:52.508101 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3d14903eaff5"
	I0816 00:12:52.575393 2407112 logs.go:123] Gathering logs for etcd [15f34ed96b2b] ...
	I0816 00:12:52.575429 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 15f34ed96b2b"
	I0816 00:12:52.609501 2407112 logs.go:123] Gathering logs for kube-proxy [1e79f4e5d490] ...
	I0816 00:12:52.609527 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 1e79f4e5d490"
	I0816 00:12:52.632173 2407112 logs.go:123] Gathering logs for kube-controller-manager [cc3ceefdfcf9] ...
	I0816 00:12:52.632251 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 cc3ceefdfcf9"
	I0816 00:12:52.680493 2407112 logs.go:123] Gathering logs for Docker ...
	I0816 00:12:52.680527 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
	I0816 00:12:52.708209 2407112 logs.go:123] Gathering logs for dmesg ...
	I0816 00:12:52.708242 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
	I0816 00:12:52.725031 2407112 logs.go:123] Gathering logs for describe nodes ...
	I0816 00:12:52.725068 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
	I0816 00:12:52.888699 2407112 logs.go:123] Gathering logs for kube-apiserver [682baec10b08] ...
	I0816 00:12:52.888730 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 682baec10b08"
	I0816 00:12:52.947378 2407112 logs.go:123] Gathering logs for etcd [5aacba0afc73] ...
	I0816 00:12:52.947415 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 5aacba0afc73"
	I0816 00:12:52.984633 2407112 logs.go:123] Gathering logs for kube-controller-manager [821653363c67] ...
	I0816 00:12:52.984685 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 821653363c67"
	I0816 00:12:53.041565 2407112 logs.go:123] Gathering logs for kubernetes-dashboard [3ef1e388df06] ...
	I0816 00:12:53.041610 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3ef1e388df06"
	I0816 00:12:53.063527 2407112 logs.go:123] Gathering logs for kubelet ...
	I0816 00:12:53.063556 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
	W0816 00:12:53.119952 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.595590    1361 reflector.go:138] object-"kube-system"/"kube-proxy-token-7vfmt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kube-proxy-token-7vfmt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.120210 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.596830    1361 reflector.go:138] object-"kube-system"/"coredns": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.120436 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597148    1361 reflector.go:138] object-"kube-system"/"coredns-token-xzs4d": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "coredns-token-xzs4d" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.120668 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597346    1361 reflector.go:138] object-"kube-system"/"metrics-server-token-545hd": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "metrics-server-token-545hd" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.120885 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597537    1361 reflector.go:138] object-"kube-system"/"kube-proxy": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.121122 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597780    1361 reflector.go:138] object-"kube-system"/"storage-provisioner-token-7rcl5": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "storage-provisioner-token-7rcl5" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.121338 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597976    1361 reflector.go:138] object-"default"/"default-token-zv2bb": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "default-token-zv2bb" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.131353 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.865188    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:12:53.131864 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.961596    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.132568 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:27 old-k8s-version-894472 kubelet[1361]: E0816 00:07:27.069241    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.135723 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:40 old-k8s-version-894472 kubelet[1361]: E0816 00:07:40.936997    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:12:53.136063 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:41 old-k8s-version-894472 kubelet[1361]: E0816 00:07:41.174078    1361 reflector.go:138] object-"kubernetes-dashboard"/"kubernetes-dashboard-token-2w5nt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kubernetes-dashboard-token-2w5nt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kubernetes-dashboard": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.140916 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:48 old-k8s-version-894472 kubelet[1361]: E0816 00:07:48.480988    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:12:53.141483 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:49 old-k8s-version-894472 kubelet[1361]: E0816 00:07:49.497915    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.141711 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:52 old-k8s-version-894472 kubelet[1361]: E0816 00:07:52.902604    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.142184 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:56 old-k8s-version-894472 kubelet[1361]: E0816 00:07:56.570944    1361 pod_workers.go:191] Error syncing pod 3c69c2c8-274b-42e9-83f4-e56b1a377a84 ("storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 10s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"
	W0816 00:12:53.144872 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.540574    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:12:53.147012 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.937109    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:12:53.147341 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.906978    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.147546 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.907490    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.147738 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:33 old-k8s-version-894472 kubelet[1361]: E0816 00:08:33.905951    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.150049 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:34 old-k8s-version-894472 kubelet[1361]: E0816 00:08:34.587830    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:12:53.150259 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.913192    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.152418 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.930752    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:12:53.152621 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:59 old-k8s-version-894472 kubelet[1361]: E0816 00:08:59.902173    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.152813 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:01 old-k8s-version-894472 kubelet[1361]: E0816 00:09:01.906082    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.153018 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:10 old-k8s-version-894472 kubelet[1361]: E0816 00:09:10.903130    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.153211 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:13 old-k8s-version-894472 kubelet[1361]: E0816 00:09:13.910937    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.155518 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:22 old-k8s-version-894472 kubelet[1361]: E0816 00:09:22.525085    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:12:53.155709 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:28 old-k8s-version-894472 kubelet[1361]: E0816 00:09:28.902818    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.155918 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:36 old-k8s-version-894472 kubelet[1361]: E0816 00:09:36.902337    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.156108 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:42 old-k8s-version-894472 kubelet[1361]: E0816 00:09:42.908410    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.156310 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:50 old-k8s-version-894472 kubelet[1361]: E0816 00:09:50.917752    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.156500 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:53 old-k8s-version-894472 kubelet[1361]: E0816 00:09:53.902385    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.156706 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:02 old-k8s-version-894472 kubelet[1361]: E0816 00:10:02.902250    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.156909 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:07 old-k8s-version-894472 kubelet[1361]: E0816 00:10:07.902036    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.157115 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:13 old-k8s-version-894472 kubelet[1361]: E0816 00:10:13.908849    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.159233 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:22 old-k8s-version-894472 kubelet[1361]: E0816 00:10:22.924390    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:12:53.159438 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:25 old-k8s-version-894472 kubelet[1361]: E0816 00:10:25.902239    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.159639 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.903386    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.159827 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.904171    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.160016 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:51 old-k8s-version-894472 kubelet[1361]: E0816 00:10:51.902209    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.162349 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458905    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:12:53.162553 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.902219    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.162745 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.905343    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.162934 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:16 old-k8s-version-894472 kubelet[1361]: E0816 00:11:16.908006    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.163136 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:17 old-k8s-version-894472 kubelet[1361]: E0816 00:11:17.912657    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.163324 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:31 old-k8s-version-894472 kubelet[1361]: E0816 00:11:31.902307    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.163546 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:32 old-k8s-version-894472 kubelet[1361]: E0816 00:11:32.902285    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.163735 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:43 old-k8s-version-894472 kubelet[1361]: E0816 00:11:43.902913    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.163937 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:47 old-k8s-version-894472 kubelet[1361]: E0816 00:11:47.902276    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.164127 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:56 old-k8s-version-894472 kubelet[1361]: E0816 00:11:56.907938    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.164331 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:01 old-k8s-version-894472 kubelet[1361]: E0816 00:12:01.910769    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.164519 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:07 old-k8s-version-894472 kubelet[1361]: E0816 00:12:07.902409    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.164729 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:12 old-k8s-version-894472 kubelet[1361]: E0816 00:12:12.925048    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.164919 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:18 old-k8s-version-894472 kubelet[1361]: E0816 00:12:18.903513    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.165121 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.165309 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.165511 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.165721 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.165909 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	I0816 00:12:53.165921 2407112 logs.go:123] Gathering logs for coredns [f583e4715841] ...
	I0816 00:12:53.165936 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 f583e4715841"
	I0816 00:12:53.191198 2407112 logs.go:123] Gathering logs for container status ...
	I0816 00:12:53.191227 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
	I0816 00:12:53.281304 2407112 logs.go:123] Gathering logs for coredns [0646646e2348] ...
	I0816 00:12:53.281343 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 0646646e2348"
	I0816 00:12:53.302996 2407112 logs.go:123] Gathering logs for kube-scheduler [003fa784026a] ...
	I0816 00:12:53.303072 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 003fa784026a"
	I0816 00:12:53.328690 2407112 logs.go:123] Gathering logs for kube-proxy [c5eeddd51e95] ...
	I0816 00:12:53.328718 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 c5eeddd51e95"
	I0816 00:12:53.351111 2407112 logs.go:123] Gathering logs for storage-provisioner [a6efcfb5cb17] ...
	I0816 00:12:53.351142 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a6efcfb5cb17"
	I0816 00:12:53.372302 2407112 logs.go:123] Gathering logs for storage-provisioner [de096650c620] ...
	I0816 00:12:53.372331 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 de096650c620"
	I0816 00:12:53.392435 2407112 logs.go:123] Gathering logs for kube-scheduler [67be7ec054c6] ...
	I0816 00:12:53.392464 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 67be7ec054c6"
	I0816 00:12:53.414931 2407112 out.go:358] Setting ErrFile to fd 2...
	I0816 00:12:53.414956 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
	W0816 00:12:53.415035 2407112 out.go:270] X Problems detected in kubelet:
	X Problems detected in kubelet:
	W0816 00:12:53.415081 2407112 out.go:270]   Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	  Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.415095 2407112 out.go:270]   Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	  Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.415117 2407112 out.go:270]   Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	  Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.415131 2407112 out.go:270]   Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	  Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.415139 2407112 out.go:270]   Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	  Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	I0816 00:12:53.415178 2407112 out.go:358] Setting ErrFile to fd 2...
	I0816 00:12:53.415187 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0816 00:13:03.417016 2407112 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0816 00:13:03.428859 2407112 api_server.go:72] duration metric: took 5m57.651838639s to wait for apiserver process to appear ...
	I0816 00:13:03.428887 2407112 api_server.go:88] waiting for apiserver healthz status ...
	I0816 00:13:03.428962 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
	I0816 00:13:03.446552 2407112 logs.go:276] 2 containers: [682baec10b08 3d14903eaff5]
	I0816 00:13:03.446627 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
	I0816 00:13:03.463688 2407112 logs.go:276] 2 containers: [5aacba0afc73 15f34ed96b2b]
	I0816 00:13:03.463770 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
	I0816 00:13:03.485222 2407112 logs.go:276] 2 containers: [0646646e2348 f583e4715841]
	I0816 00:13:03.485299 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
	I0816 00:13:03.503320 2407112 logs.go:276] 2 containers: [67be7ec054c6 003fa784026a]
	I0816 00:13:03.503399 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
	I0816 00:13:03.522597 2407112 logs.go:276] 2 containers: [c5eeddd51e95 1e79f4e5d490]
	I0816 00:13:03.522681 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
	I0816 00:13:03.542588 2407112 logs.go:276] 2 containers: [cc3ceefdfcf9 821653363c67]
	I0816 00:13:03.542672 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
	I0816 00:13:03.566735 2407112 logs.go:276] 0 containers: []
	W0816 00:13:03.566759 2407112 logs.go:278] No container was found matching "kindnet"
	I0816 00:13:03.566817 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kubernetes-dashboard --format={{.ID}}
	I0816 00:13:03.585575 2407112 logs.go:276] 1 containers: [3ef1e388df06]
	I0816 00:13:03.585709 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
	I0816 00:13:03.604527 2407112 logs.go:276] 2 containers: [a6efcfb5cb17 de096650c620]
	I0816 00:13:03.604570 2407112 logs.go:123] Gathering logs for container status ...
	I0816 00:13:03.604588 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
	I0816 00:13:03.669008 2407112 logs.go:123] Gathering logs for kube-apiserver [682baec10b08] ...
	I0816 00:13:03.669037 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 682baec10b08"
	I0816 00:13:03.712566 2407112 logs.go:123] Gathering logs for etcd [5aacba0afc73] ...
	I0816 00:13:03.712600 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 5aacba0afc73"
	I0816 00:13:03.737934 2407112 logs.go:123] Gathering logs for etcd [15f34ed96b2b] ...
	I0816 00:13:03.738002 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 15f34ed96b2b"
	I0816 00:13:03.771409 2407112 logs.go:123] Gathering logs for coredns [f583e4715841] ...
	I0816 00:13:03.771482 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 f583e4715841"
	I0816 00:13:03.797096 2407112 logs.go:123] Gathering logs for kube-scheduler [67be7ec054c6] ...
	I0816 00:13:03.797128 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 67be7ec054c6"
	I0816 00:13:03.826332 2407112 logs.go:123] Gathering logs for kube-controller-manager [cc3ceefdfcf9] ...
	I0816 00:13:03.826367 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 cc3ceefdfcf9"
	I0816 00:13:03.865698 2407112 logs.go:123] Gathering logs for Docker ...
	I0816 00:13:03.865729 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
	I0816 00:13:03.892350 2407112 logs.go:123] Gathering logs for kubelet ...
	I0816 00:13:03.892378 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
	W0816 00:13:03.954017 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.595590    1361 reflector.go:138] object-"kube-system"/"kube-proxy-token-7vfmt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kube-proxy-token-7vfmt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.954368 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.596830    1361 reflector.go:138] object-"kube-system"/"coredns": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.954651 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597148    1361 reflector.go:138] object-"kube-system"/"coredns-token-xzs4d": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "coredns-token-xzs4d" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.954880 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597346    1361 reflector.go:138] object-"kube-system"/"metrics-server-token-545hd": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "metrics-server-token-545hd" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.955093 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597537    1361 reflector.go:138] object-"kube-system"/"kube-proxy": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.955328 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597780    1361 reflector.go:138] object-"kube-system"/"storage-provisioner-token-7rcl5": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "storage-provisioner-token-7rcl5" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.955543 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597976    1361 reflector.go:138] object-"default"/"default-token-zv2bb": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "default-token-zv2bb" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.966447 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.865188    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:13:03.967038 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.961596    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.967719 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:27 old-k8s-version-894472 kubelet[1361]: E0816 00:07:27.069241    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.970902 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:40 old-k8s-version-894472 kubelet[1361]: E0816 00:07:40.936997    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:13:03.971269 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:41 old-k8s-version-894472 kubelet[1361]: E0816 00:07:41.174078    1361 reflector.go:138] object-"kubernetes-dashboard"/"kubernetes-dashboard-token-2w5nt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kubernetes-dashboard-token-2w5nt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kubernetes-dashboard": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.975634 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:48 old-k8s-version-894472 kubelet[1361]: E0816 00:07:48.480988    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:13:03.976189 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:49 old-k8s-version-894472 kubelet[1361]: E0816 00:07:49.497915    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.976380 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:52 old-k8s-version-894472 kubelet[1361]: E0816 00:07:52.902604    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.976839 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:56 old-k8s-version-894472 kubelet[1361]: E0816 00:07:56.570944    1361 pod_workers.go:191] Error syncing pod 3c69c2c8-274b-42e9-83f4-e56b1a377a84 ("storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 10s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"
	W0816 00:13:03.979485 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.540574    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:13:03.981633 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.937109    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:13:03.981960 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.906978    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.982163 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.907490    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.982356 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:33 old-k8s-version-894472 kubelet[1361]: E0816 00:08:33.905951    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.984654 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:34 old-k8s-version-894472 kubelet[1361]: E0816 00:08:34.587830    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:13:03.984859 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.913192    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.986993 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.930752    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:13:03.987196 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:59 old-k8s-version-894472 kubelet[1361]: E0816 00:08:59.902173    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.987384 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:01 old-k8s-version-894472 kubelet[1361]: E0816 00:09:01.906082    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.987586 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:10 old-k8s-version-894472 kubelet[1361]: E0816 00:09:10.903130    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.987776 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:13 old-k8s-version-894472 kubelet[1361]: E0816 00:09:13.910937    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.990067 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:22 old-k8s-version-894472 kubelet[1361]: E0816 00:09:22.525085    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:13:03.990260 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:28 old-k8s-version-894472 kubelet[1361]: E0816 00:09:28.902818    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.990464 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:36 old-k8s-version-894472 kubelet[1361]: E0816 00:09:36.902337    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.990653 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:42 old-k8s-version-894472 kubelet[1361]: E0816 00:09:42.908410    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.990862 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:50 old-k8s-version-894472 kubelet[1361]: E0816 00:09:50.917752    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.991053 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:53 old-k8s-version-894472 kubelet[1361]: E0816 00:09:53.902385    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.991255 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:02 old-k8s-version-894472 kubelet[1361]: E0816 00:10:02.902250    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.991445 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:07 old-k8s-version-894472 kubelet[1361]: E0816 00:10:07.902036    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.991646 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:13 old-k8s-version-894472 kubelet[1361]: E0816 00:10:13.908849    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.993804 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:22 old-k8s-version-894472 kubelet[1361]: E0816 00:10:22.924390    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:13:03.994007 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:25 old-k8s-version-894472 kubelet[1361]: E0816 00:10:25.902239    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.994210 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.903386    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.994398 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.904171    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.994591 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:51 old-k8s-version-894472 kubelet[1361]: E0816 00:10:51.902209    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.996873 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458905    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:13:03.997075 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.902219    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.997263 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.905343    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.997451 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:16 old-k8s-version-894472 kubelet[1361]: E0816 00:11:16.908006    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.997659 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:17 old-k8s-version-894472 kubelet[1361]: E0816 00:11:17.912657    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.997848 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:31 old-k8s-version-894472 kubelet[1361]: E0816 00:11:31.902307    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.998049 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:32 old-k8s-version-894472 kubelet[1361]: E0816 00:11:32.902285    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.998237 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:43 old-k8s-version-894472 kubelet[1361]: E0816 00:11:43.902913    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.998439 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:47 old-k8s-version-894472 kubelet[1361]: E0816 00:11:47.902276    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.998629 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:56 old-k8s-version-894472 kubelet[1361]: E0816 00:11:56.907938    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.998831 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:01 old-k8s-version-894472 kubelet[1361]: E0816 00:12:01.910769    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.999022 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:07 old-k8s-version-894472 kubelet[1361]: E0816 00:12:07.902409    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.999225 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:12 old-k8s-version-894472 kubelet[1361]: E0816 00:12:12.925048    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.999414 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:18 old-k8s-version-894472 kubelet[1361]: E0816 00:12:18.903513    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.999616 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.999804 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.000006 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.000207 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.000397 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.000599 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:58 old-k8s-version-894472 kubelet[1361]: E0816 00:12:58.902545    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.000789 2407112 logs.go:138] Found kubelet problem: Aug 16 00:13:02 old-k8s-version-894472 kubelet[1361]: E0816 00:13:02.903209    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	I0816 00:13:04.000801 2407112 logs.go:123] Gathering logs for dmesg ...
	I0816 00:13:04.000816 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
	I0816 00:13:04.020061 2407112 logs.go:123] Gathering logs for kubernetes-dashboard [3ef1e388df06] ...
	I0816 00:13:04.020091 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3ef1e388df06"
	I0816 00:13:04.044867 2407112 logs.go:123] Gathering logs for storage-provisioner [a6efcfb5cb17] ...
	I0816 00:13:04.044946 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a6efcfb5cb17"
	I0816 00:13:04.076966 2407112 logs.go:123] Gathering logs for describe nodes ...
	I0816 00:13:04.077044 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
	I0816 00:13:04.233528 2407112 logs.go:123] Gathering logs for coredns [0646646e2348] ...
	I0816 00:13:04.233554 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 0646646e2348"
	I0816 00:13:04.259485 2407112 logs.go:123] Gathering logs for kube-proxy [1e79f4e5d490] ...
	I0816 00:13:04.259515 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 1e79f4e5d490"
	I0816 00:13:04.286001 2407112 logs.go:123] Gathering logs for kube-apiserver [3d14903eaff5] ...
	I0816 00:13:04.286028 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3d14903eaff5"
	I0816 00:13:04.367586 2407112 logs.go:123] Gathering logs for kube-scheduler [003fa784026a] ...
	I0816 00:13:04.367621 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 003fa784026a"
	I0816 00:13:04.398199 2407112 logs.go:123] Gathering logs for kube-proxy [c5eeddd51e95] ...
	I0816 00:13:04.398227 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 c5eeddd51e95"
	I0816 00:13:04.420433 2407112 logs.go:123] Gathering logs for kube-controller-manager [821653363c67] ...
	I0816 00:13:04.420463 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 821653363c67"
	I0816 00:13:04.480676 2407112 logs.go:123] Gathering logs for storage-provisioner [de096650c620] ...
	I0816 00:13:04.480722 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 de096650c620"
	I0816 00:13:04.501321 2407112 out.go:358] Setting ErrFile to fd 2...
	I0816 00:13:04.501350 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
	W0816 00:13:04.501418 2407112 out.go:270] X Problems detected in kubelet:
	X Problems detected in kubelet:
	W0816 00:13:04.501441 2407112 out.go:270]   Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	  Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.501458 2407112 out.go:270]   Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	  Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.501468 2407112 out.go:270]   Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	  Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.501473 2407112 out.go:270]   Aug 16 00:12:58 old-k8s-version-894472 kubelet[1361]: E0816 00:12:58.902545    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	  Aug 16 00:12:58 old-k8s-version-894472 kubelet[1361]: E0816 00:12:58.902545    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.501479 2407112 out.go:270]   Aug 16 00:13:02 old-k8s-version-894472 kubelet[1361]: E0816 00:13:02.903209    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	  Aug 16 00:13:02 old-k8s-version-894472 kubelet[1361]: E0816 00:13:02.903209    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	I0816 00:13:04.501496 2407112 out.go:358] Setting ErrFile to fd 2...
	I0816 00:13:04.501501 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0816 00:13:14.507900 2407112 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
	I0816 00:13:14.517839 2407112 api_server.go:279] https://192.168.85.2:8443/healthz returned 200:
	ok
	I0816 00:13:14.520292 2407112 out.go:201] 
	W0816 00:13:14.522384 2407112 out.go:270] X Exiting due to K8S_UNHEALTHY_CONTROL_PLANE: wait 6m0s for node: wait for healthy API server: controlPlane never updated to v1.20.0
	X Exiting due to K8S_UNHEALTHY_CONTROL_PLANE: wait 6m0s for node: wait for healthy API server: controlPlane never updated to v1.20.0
	W0816 00:13:14.522420 2407112 out.go:270] * Suggestion: Control Plane could not update, try minikube delete --all --purge
	* Suggestion: Control Plane could not update, try minikube delete --all --purge
	W0816 00:13:14.522438 2407112 out.go:270] * Related issue: https://github.com/kubernetes/minikube/issues/11417
	* Related issue: https://github.com/kubernetes/minikube/issues/11417
	W0816 00:13:14.522444 2407112 out.go:270] * 
	* 
	W0816 00:13:14.523417 2407112 out.go:293] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯
	I0816 00:13:14.525402 2407112 out.go:201] 

                                                
                                                
** /stderr **
start_stop_delete_test.go:259: failed to start minikube post-stop. args "out/minikube-linux-arm64 start -p old-k8s-version-894472 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.20.0": exit status 102
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======>  post-mortem[TestStartStop/group/old-k8s-version/serial/SecondStart]: docker inspect <======
helpers_test.go:231: (dbg) Run:  docker inspect old-k8s-version-894472
helpers_test.go:235: (dbg) docker inspect old-k8s-version-894472:

                                                
                                                
-- stdout --
	[
	    {
	        "Id": "64c0bba19cec9659a7141ba86ce4ac16804f2bad1bc8890b1de204ed24b4df5f",
	        "Created": "2024-08-16T00:04:09.217242083Z",
	        "Path": "/usr/local/bin/entrypoint",
	        "Args": [
	            "/sbin/init"
	        ],
	        "State": {
	            "Status": "running",
	            "Running": true,
	            "Paused": false,
	            "Restarting": false,
	            "OOMKilled": false,
	            "Dead": false,
	            "Pid": 2407535,
	            "ExitCode": 0,
	            "Error": "",
	            "StartedAt": "2024-08-16T00:06:57.018774687Z",
	            "FinishedAt": "2024-08-16T00:06:55.82218271Z"
	        },
	        "Image": "sha256:decdd59746a9dba10062a73f6cd4b910c7b4e60613660b1022f8357747681c4d",
	        "ResolvConfPath": "/var/lib/docker/containers/64c0bba19cec9659a7141ba86ce4ac16804f2bad1bc8890b1de204ed24b4df5f/resolv.conf",
	        "HostnamePath": "/var/lib/docker/containers/64c0bba19cec9659a7141ba86ce4ac16804f2bad1bc8890b1de204ed24b4df5f/hostname",
	        "HostsPath": "/var/lib/docker/containers/64c0bba19cec9659a7141ba86ce4ac16804f2bad1bc8890b1de204ed24b4df5f/hosts",
	        "LogPath": "/var/lib/docker/containers/64c0bba19cec9659a7141ba86ce4ac16804f2bad1bc8890b1de204ed24b4df5f/64c0bba19cec9659a7141ba86ce4ac16804f2bad1bc8890b1de204ed24b4df5f-json.log",
	        "Name": "/old-k8s-version-894472",
	        "RestartCount": 0,
	        "Driver": "overlay2",
	        "Platform": "linux",
	        "MountLabel": "",
	        "ProcessLabel": "",
	        "AppArmorProfile": "unconfined",
	        "ExecIDs": null,
	        "HostConfig": {
	            "Binds": [
	                "/lib/modules:/lib/modules:ro",
	                "old-k8s-version-894472:/var"
	            ],
	            "ContainerIDFile": "",
	            "LogConfig": {
	                "Type": "json-file",
	                "Config": {}
	            },
	            "NetworkMode": "old-k8s-version-894472",
	            "PortBindings": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": ""
	                    }
	                ]
	            },
	            "RestartPolicy": {
	                "Name": "no",
	                "MaximumRetryCount": 0
	            },
	            "AutoRemove": false,
	            "VolumeDriver": "",
	            "VolumesFrom": null,
	            "ConsoleSize": [
	                0,
	                0
	            ],
	            "CapAdd": null,
	            "CapDrop": null,
	            "CgroupnsMode": "host",
	            "Dns": [],
	            "DnsOptions": [],
	            "DnsSearch": [],
	            "ExtraHosts": null,
	            "GroupAdd": null,
	            "IpcMode": "private",
	            "Cgroup": "",
	            "Links": null,
	            "OomScoreAdj": 0,
	            "PidMode": "",
	            "Privileged": true,
	            "PublishAllPorts": false,
	            "ReadonlyRootfs": false,
	            "SecurityOpt": [
	                "seccomp=unconfined",
	                "apparmor=unconfined",
	                "label=disable"
	            ],
	            "Tmpfs": {
	                "/run": "",
	                "/tmp": ""
	            },
	            "UTSMode": "",
	            "UsernsMode": "",
	            "ShmSize": 67108864,
	            "Runtime": "runc",
	            "Isolation": "",
	            "CpuShares": 0,
	            "Memory": 2306867200,
	            "NanoCpus": 2000000000,
	            "CgroupParent": "",
	            "BlkioWeight": 0,
	            "BlkioWeightDevice": [],
	            "BlkioDeviceReadBps": [],
	            "BlkioDeviceWriteBps": [],
	            "BlkioDeviceReadIOps": [],
	            "BlkioDeviceWriteIOps": [],
	            "CpuPeriod": 0,
	            "CpuQuota": 0,
	            "CpuRealtimePeriod": 0,
	            "CpuRealtimeRuntime": 0,
	            "CpusetCpus": "",
	            "CpusetMems": "",
	            "Devices": [],
	            "DeviceCgroupRules": null,
	            "DeviceRequests": null,
	            "MemoryReservation": 0,
	            "MemorySwap": 4613734400,
	            "MemorySwappiness": null,
	            "OomKillDisable": false,
	            "PidsLimit": null,
	            "Ulimits": [],
	            "CpuCount": 0,
	            "CpuPercent": 0,
	            "IOMaximumIOps": 0,
	            "IOMaximumBandwidth": 0,
	            "MaskedPaths": null,
	            "ReadonlyPaths": null
	        },
	        "GraphDriver": {
	            "Data": {
	                "LowerDir": "/var/lib/docker/overlay2/a218c1210234467c18f43392ece19787d17d411370e1157a3e6c33511b377fc6-init/diff:/var/lib/docker/overlay2/6ed902a04c22dd3041d65f8183926fcc1f46fb9c240ed2c4472a750ce633e7fc/diff",
	                "MergedDir": "/var/lib/docker/overlay2/a218c1210234467c18f43392ece19787d17d411370e1157a3e6c33511b377fc6/merged",
	                "UpperDir": "/var/lib/docker/overlay2/a218c1210234467c18f43392ece19787d17d411370e1157a3e6c33511b377fc6/diff",
	                "WorkDir": "/var/lib/docker/overlay2/a218c1210234467c18f43392ece19787d17d411370e1157a3e6c33511b377fc6/work"
	            },
	            "Name": "overlay2"
	        },
	        "Mounts": [
	            {
	                "Type": "bind",
	                "Source": "/lib/modules",
	                "Destination": "/lib/modules",
	                "Mode": "ro",
	                "RW": false,
	                "Propagation": "rprivate"
	            },
	            {
	                "Type": "volume",
	                "Name": "old-k8s-version-894472",
	                "Source": "/var/lib/docker/volumes/old-k8s-version-894472/_data",
	                "Destination": "/var",
	                "Driver": "local",
	                "Mode": "z",
	                "RW": true,
	                "Propagation": ""
	            }
	        ],
	        "Config": {
	            "Hostname": "old-k8s-version-894472",
	            "Domainname": "",
	            "User": "",
	            "AttachStdin": false,
	            "AttachStdout": false,
	            "AttachStderr": false,
	            "ExposedPorts": {
	                "22/tcp": {},
	                "2376/tcp": {},
	                "32443/tcp": {},
	                "5000/tcp": {},
	                "8443/tcp": {}
	            },
	            "Tty": true,
	            "OpenStdin": false,
	            "StdinOnce": false,
	            "Env": [
	                "container=docker",
	                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
	            ],
	            "Cmd": null,
	            "Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d",
	            "Volumes": null,
	            "WorkingDir": "/",
	            "Entrypoint": [
	                "/usr/local/bin/entrypoint",
	                "/sbin/init"
	            ],
	            "OnBuild": null,
	            "Labels": {
	                "created_by.minikube.sigs.k8s.io": "true",
	                "mode.minikube.sigs.k8s.io": "old-k8s-version-894472",
	                "name.minikube.sigs.k8s.io": "old-k8s-version-894472",
	                "role.minikube.sigs.k8s.io": ""
	            },
	            "StopSignal": "SIGRTMIN+3"
	        },
	        "NetworkSettings": {
	            "Bridge": "",
	            "SandboxID": "f305d36a01d1a564e6e37e3d35aaa4195e813b0f7ae24251a638945f693fd8e0",
	            "SandboxKey": "/var/run/docker/netns/f305d36a01d1",
	            "Ports": {
	                "22/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "35084"
	                    }
	                ],
	                "2376/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "35085"
	                    }
	                ],
	                "32443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "35088"
	                    }
	                ],
	                "5000/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "35086"
	                    }
	                ],
	                "8443/tcp": [
	                    {
	                        "HostIp": "127.0.0.1",
	                        "HostPort": "35087"
	                    }
	                ]
	            },
	            "HairpinMode": false,
	            "LinkLocalIPv6Address": "",
	            "LinkLocalIPv6PrefixLen": 0,
	            "SecondaryIPAddresses": null,
	            "SecondaryIPv6Addresses": null,
	            "EndpointID": "",
	            "Gateway": "",
	            "GlobalIPv6Address": "",
	            "GlobalIPv6PrefixLen": 0,
	            "IPAddress": "",
	            "IPPrefixLen": 0,
	            "IPv6Gateway": "",
	            "MacAddress": "",
	            "Networks": {
	                "old-k8s-version-894472": {
	                    "IPAMConfig": {
	                        "IPv4Address": "192.168.85.2"
	                    },
	                    "Links": null,
	                    "Aliases": null,
	                    "MacAddress": "02:42:c0:a8:55:02",
	                    "DriverOpts": null,
	                    "NetworkID": "e8b3ff14cbdfc2a1286db62206966a0ebe57a37d2a08f526b564e5256ad6450c",
	                    "EndpointID": "3150bec38809672b11b5a7c1b22bc2073898b23a50eb852356ccc39b9bae4fba",
	                    "Gateway": "192.168.85.1",
	                    "IPAddress": "192.168.85.2",
	                    "IPPrefixLen": 24,
	                    "IPv6Gateway": "",
	                    "GlobalIPv6Address": "",
	                    "GlobalIPv6PrefixLen": 0,
	                    "DNSNames": [
	                        "old-k8s-version-894472",
	                        "64c0bba19cec"
	                    ]
	                }
	            }
	        }
	    }
	]

                                                
                                                
-- /stdout --
helpers_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-894472 -n old-k8s-version-894472
helpers_test.go:244: <<< TestStartStop/group/old-k8s-version/serial/SecondStart FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======>  post-mortem[TestStartStop/group/old-k8s-version/serial/SecondStart]: minikube logs <======
helpers_test.go:247: (dbg) Run:  out/minikube-linux-arm64 -p old-k8s-version-894472 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p old-k8s-version-894472 logs -n 25: (1.303128489s)
helpers_test.go:252: TestStartStop/group/old-k8s-version/serial/SecondStart logs: 
-- stdout --
	
	==> Audit <==
	|---------|--------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
	| Command |                          Args                          |        Profile         |  User   | Version |     Start Time      |      End Time       |
	|---------|--------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
	| ssh     | -p false-055531 sudo systemctl                         | false-055531           | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
	|         | status containerd --all --full                         |                        |         |         |                     |                     |
	|         | --no-pager                                             |                        |         |         |                     |                     |
	| ssh     | -p false-055531 sudo systemctl                         | false-055531           | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
	|         | cat containerd --no-pager                              |                        |         |         |                     |                     |
	| ssh     | -p false-055531 sudo cat                               | false-055531           | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
	|         | /lib/systemd/system/containerd.service                 |                        |         |         |                     |                     |
	| ssh     | -p false-055531 sudo cat                               | false-055531           | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
	|         | /etc/containerd/config.toml                            |                        |         |         |                     |                     |
	| ssh     | -p false-055531 sudo                                   | false-055531           | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
	|         | containerd config dump                                 |                        |         |         |                     |                     |
	| ssh     | -p false-055531 sudo systemctl                         | false-055531           | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC |                     |
	|         | status crio --all --full                               |                        |         |         |                     |                     |
	|         | --no-pager                                             |                        |         |         |                     |                     |
	| ssh     | -p false-055531 sudo systemctl                         | false-055531           | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
	|         | cat crio --no-pager                                    |                        |         |         |                     |                     |
	| ssh     | -p false-055531 sudo find                              | false-055531           | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
	|         | /etc/crio -type f -exec sh -c                          |                        |         |         |                     |                     |
	|         | 'echo {}; cat {}' \;                                   |                        |         |         |                     |                     |
	| ssh     | -p false-055531 sudo crio                              | false-055531           | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
	|         | config                                                 |                        |         |         |                     |                     |
	| delete  | -p false-055531                                        | false-055531           | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
	| start   | -p no-preload-158739                                   | no-preload-158739      | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:06 UTC |
	|         | --memory=2200                                          |                        |         |         |                     |                     |
	|         | --alsologtostderr                                      |                        |         |         |                     |                     |
	|         | --wait=true --preload=false                            |                        |         |         |                     |                     |
	|         | --driver=docker                                        |                        |         |         |                     |                     |
	|         | --container-runtime=docker                             |                        |         |         |                     |                     |
	|         | --kubernetes-version=v1.31.0                           |                        |         |         |                     |                     |
	| addons  | enable metrics-server -p no-preload-158739             | no-preload-158739      | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC | 16 Aug 24 00:06 UTC |
	|         | --images=MetricsServer=registry.k8s.io/echoserver:1.4  |                        |         |         |                     |                     |
	|         | --registries=MetricsServer=fake.domain                 |                        |         |         |                     |                     |
	| addons  | enable metrics-server -p old-k8s-version-894472        | old-k8s-version-894472 | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC | 16 Aug 24 00:06 UTC |
	|         | --images=MetricsServer=registry.k8s.io/echoserver:1.4  |                        |         |         |                     |                     |
	|         | --registries=MetricsServer=fake.domain                 |                        |         |         |                     |                     |
	| stop    | -p no-preload-158739                                   | no-preload-158739      | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC | 16 Aug 24 00:06 UTC |
	|         | --alsologtostderr -v=3                                 |                        |         |         |                     |                     |
	| stop    | -p old-k8s-version-894472                              | old-k8s-version-894472 | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC | 16 Aug 24 00:06 UTC |
	|         | --alsologtostderr -v=3                                 |                        |         |         |                     |                     |
	| addons  | enable dashboard -p no-preload-158739                  | no-preload-158739      | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC | 16 Aug 24 00:06 UTC |
	|         | --images=MetricsScraper=registry.k8s.io/echoserver:1.4 |                        |         |         |                     |                     |
	| start   | -p no-preload-158739                                   | no-preload-158739      | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC | 16 Aug 24 00:11 UTC |
	|         | --memory=2200                                          |                        |         |         |                     |                     |
	|         | --alsologtostderr                                      |                        |         |         |                     |                     |
	|         | --wait=true --preload=false                            |                        |         |         |                     |                     |
	|         | --driver=docker                                        |                        |         |         |                     |                     |
	|         | --container-runtime=docker                             |                        |         |         |                     |                     |
	|         | --kubernetes-version=v1.31.0                           |                        |         |         |                     |                     |
	| addons  | enable dashboard -p old-k8s-version-894472             | old-k8s-version-894472 | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC | 16 Aug 24 00:06 UTC |
	|         | --images=MetricsScraper=registry.k8s.io/echoserver:1.4 |                        |         |         |                     |                     |
	| start   | -p old-k8s-version-894472                              | old-k8s-version-894472 | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC |                     |
	|         | --memory=2200                                          |                        |         |         |                     |                     |
	|         | --alsologtostderr --wait=true                          |                        |         |         |                     |                     |
	|         | --kvm-network=default                                  |                        |         |         |                     |                     |
	|         | --kvm-qemu-uri=qemu:///system                          |                        |         |         |                     |                     |
	|         | --disable-driver-mounts                                |                        |         |         |                     |                     |
	|         | --keep-context=false                                   |                        |         |         |                     |                     |
	|         | --driver=docker                                        |                        |         |         |                     |                     |
	|         | --container-runtime=docker                             |                        |         |         |                     |                     |
	|         | --kubernetes-version=v1.20.0                           |                        |         |         |                     |                     |
	| image   | no-preload-158739 image list                           | no-preload-158739      | jenkins | v1.33.1 | 16 Aug 24 00:11 UTC | 16 Aug 24 00:11 UTC |
	|         | --format=json                                          |                        |         |         |                     |                     |
	| pause   | -p no-preload-158739                                   | no-preload-158739      | jenkins | v1.33.1 | 16 Aug 24 00:11 UTC | 16 Aug 24 00:11 UTC |
	|         | --alsologtostderr -v=1                                 |                        |         |         |                     |                     |
	| unpause | -p no-preload-158739                                   | no-preload-158739      | jenkins | v1.33.1 | 16 Aug 24 00:11 UTC | 16 Aug 24 00:11 UTC |
	|         | --alsologtostderr -v=1                                 |                        |         |         |                     |                     |
	| delete  | -p no-preload-158739                                   | no-preload-158739      | jenkins | v1.33.1 | 16 Aug 24 00:11 UTC | 16 Aug 24 00:11 UTC |
	| delete  | -p no-preload-158739                                   | no-preload-158739      | jenkins | v1.33.1 | 16 Aug 24 00:11 UTC | 16 Aug 24 00:11 UTC |
	| start   | -p embed-certs-951478                                  | embed-certs-951478     | jenkins | v1.33.1 | 16 Aug 24 00:11 UTC | 16 Aug 24 00:13 UTC |
	|         | --memory=2200                                          |                        |         |         |                     |                     |
	|         | --alsologtostderr --wait=true                          |                        |         |         |                     |                     |
	|         | --embed-certs --driver=docker                          |                        |         |         |                     |                     |
	|         |  --container-runtime=docker                            |                        |         |         |                     |                     |
	|         | --kubernetes-version=v1.31.0                           |                        |         |         |                     |                     |
	|---------|--------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
	
	
	==> Last Start <==
	Log file created at: 2024/08/16 00:11:52
	Running on machine: ip-172-31-29-130
	Binary: Built with gc go1.22.5 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0816 00:11:52.858524 2418220 out.go:345] Setting OutFile to fd 1 ...
	I0816 00:11:52.858706 2418220 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0816 00:11:52.858720 2418220 out.go:358] Setting ErrFile to fd 2...
	I0816 00:11:52.858725 2418220 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0816 00:11:52.859019 2418220 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
	I0816 00:11:52.859473 2418220 out.go:352] Setting JSON to false
	I0816 00:11:52.860683 2418220 start.go:129] hostinfo: {"hostname":"ip-172-31-29-130","uptime":32057,"bootTime":1723735056,"procs":245,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1067-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"36adf542-ef4f-4e2d-a0c8-6868d1383ff9"}
	I0816 00:11:52.860750 2418220 start.go:139] virtualization:  
	I0816 00:11:52.864102 2418220 out.go:177] * [embed-certs-951478] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	I0816 00:11:52.865856 2418220 out.go:177]   - MINIKUBE_LOCATION=19452
	I0816 00:11:52.865967 2418220 notify.go:220] Checking for updates...
	I0816 00:11:52.869669 2418220 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0816 00:11:52.871415 2418220 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig
	I0816 00:11:52.873079 2418220 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube
	I0816 00:11:52.875043 2418220 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I0816 00:11:52.876891 2418220 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0816 00:11:52.879499 2418220 config.go:182] Loaded profile config "old-k8s-version-894472": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.20.0
	I0816 00:11:52.879638 2418220 driver.go:392] Setting default libvirt URI to qemu:///system
	I0816 00:11:52.913834 2418220 docker.go:123] docker version: linux-27.1.2:Docker Engine - Community
	I0816 00:11:52.913949 2418220 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0816 00:11:52.975933 2418220 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:35 OomKillDisable:true NGoroutines:53 SystemTime:2024-08-16 00:11:52.966452542 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
	I0816 00:11:52.976041 2418220 docker.go:307] overlay module found
	I0816 00:11:52.979038 2418220 out.go:177] * Using the docker driver based on user configuration
	I0816 00:11:52.981027 2418220 start.go:297] selected driver: docker
	I0816 00:11:52.981047 2418220 start.go:901] validating driver "docker" against <nil>
	I0816 00:11:52.981062 2418220 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0816 00:11:52.981806 2418220 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0816 00:11:53.036894 2418220 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:35 OomKillDisable:true NGoroutines:53 SystemTime:2024-08-16 00:11:53.027304915 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
	I0816 00:11:53.037059 2418220 start_flags.go:310] no existing cluster config was found, will generate one from the flags 
	I0816 00:11:53.037279 2418220 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0816 00:11:53.039368 2418220 out.go:177] * Using Docker driver with root privileges
	I0816 00:11:53.041492 2418220 cni.go:84] Creating CNI manager for ""
	I0816 00:11:53.041534 2418220 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0816 00:11:53.041550 2418220 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
	I0816 00:11:53.041819 2418220 start.go:340] cluster config:
	{Name:embed-certs-951478 KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:embed-certs-951478 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Contain
erRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSH
AuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0816 00:11:53.044920 2418220 out.go:177] * Starting "embed-certs-951478" primary control-plane node in "embed-certs-951478" cluster
	I0816 00:11:53.046799 2418220 cache.go:121] Beginning downloading kic base image for docker with docker
	I0816 00:11:53.049143 2418220 out.go:177] * Pulling base image v0.0.44-1723740748-19452 ...
	I0816 00:11:53.052054 2418220 preload.go:131] Checking if preload exists for k8s version v1.31.0 and runtime docker
	I0816 00:11:53.052115 2418220 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.0-docker-overlay2-arm64.tar.lz4
	I0816 00:11:53.052127 2418220 cache.go:56] Caching tarball of preloaded images
	I0816 00:11:53.052139 2418220 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local docker daemon
	I0816 00:11:53.052221 2418220 preload.go:172] Found /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.0-docker-overlay2-arm64.tar.lz4 in cache, skipping download
	I0816 00:11:53.052234 2418220 cache.go:59] Finished verifying existence of preloaded tar for v1.31.0 on docker
	I0816 00:11:53.052340 2418220 profile.go:143] Saving config to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/config.json ...
	I0816 00:11:53.052360 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/config.json: {Name:mk82451302fc99f46e813a7aceca107dbdcfa5ea Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	W0816 00:11:53.071643 2418220 image.go:95] image gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d is of wrong architecture
	I0816 00:11:53.071665 2418220 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d to local cache
	I0816 00:11:53.071743 2418220 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local cache directory
	I0816 00:11:53.071766 2418220 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local cache directory, skipping pull
	I0816 00:11:53.071775 2418220 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d exists in cache, skipping pull
	I0816 00:11:53.071783 2418220 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d as a tarball
	I0816 00:11:53.071789 2418220 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d from local cache
	I0816 00:11:53.246602 2418220 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d from cached tarball
	I0816 00:11:53.246657 2418220 cache.go:194] Successfully downloaded all kic artifacts
	I0816 00:11:53.246699 2418220 start.go:360] acquireMachinesLock for embed-certs-951478: {Name:mkaf3d425cc498fe588fa88c10fbce082b5cf19b Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I0816 00:11:53.247157 2418220 start.go:364] duration metric: took 429.955µs to acquireMachinesLock for "embed-certs-951478"
	I0816 00:11:53.247202 2418220 start.go:93] Provisioning new machine with config: &{Name:embed-certs-951478 KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:embed-certs-951478 Namespace:default APIServerHAVIP: APIServe
rName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:f
alse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}
	I0816 00:11:53.247292 2418220 start.go:125] createHost starting for "" (driver="docker")
	I0816 00:11:53.343372 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:55.838560 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:53.251535 2418220 out.go:235] * Creating docker container (CPUs=2, Memory=2200MB) ...
	I0816 00:11:53.251794 2418220 start.go:159] libmachine.API.Create for "embed-certs-951478" (driver="docker")
	I0816 00:11:53.251830 2418220 client.go:168] LocalClient.Create starting
	I0816 00:11:53.251896 2418220 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem
	I0816 00:11:53.251935 2418220 main.go:141] libmachine: Decoding PEM data...
	I0816 00:11:53.251951 2418220 main.go:141] libmachine: Parsing certificate...
	I0816 00:11:53.252005 2418220 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/cert.pem
	I0816 00:11:53.252025 2418220 main.go:141] libmachine: Decoding PEM data...
	I0816 00:11:53.252039 2418220 main.go:141] libmachine: Parsing certificate...
	I0816 00:11:53.252431 2418220 cli_runner.go:164] Run: docker network inspect embed-certs-951478 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	W0816 00:11:53.268756 2418220 cli_runner.go:211] docker network inspect embed-certs-951478 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
	I0816 00:11:53.268848 2418220 network_create.go:284] running [docker network inspect embed-certs-951478] to gather additional debugging logs...
	I0816 00:11:53.268870 2418220 cli_runner.go:164] Run: docker network inspect embed-certs-951478
	W0816 00:11:53.287655 2418220 cli_runner.go:211] docker network inspect embed-certs-951478 returned with exit code 1
	I0816 00:11:53.287691 2418220 network_create.go:287] error running [docker network inspect embed-certs-951478]: docker network inspect embed-certs-951478: exit status 1
	stdout:
	[]
	
	stderr:
	Error response from daemon: network embed-certs-951478 not found
	I0816 00:11:53.287704 2418220 network_create.go:289] output of [docker network inspect embed-certs-951478]: -- stdout --
	[]
	
	-- /stdout --
	** stderr ** 
	Error response from daemon: network embed-certs-951478 not found
	
	** /stderr **
	I0816 00:11:53.287858 2418220 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0816 00:11:53.304647 2418220 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-63fa5ee8e5ef IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:02:42:bd:d4:ba:4c} reservation:<nil>}
	I0816 00:11:53.305080 2418220 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-159047428ed5 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:02:42:a7:bc:29:e1} reservation:<nil>}
	I0816 00:11:53.305518 2418220 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-701a52de9afc IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:02:42:8a:56:38:33} reservation:<nil>}
	I0816 00:11:53.306111 2418220 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40018aa0b0}
	I0816 00:11:53.306149 2418220 network_create.go:124] attempt to create docker network embed-certs-951478 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
	I0816 00:11:53.306217 2418220 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=embed-certs-951478 embed-certs-951478
	I0816 00:11:53.394923 2418220 network_create.go:108] docker network embed-certs-951478 192.168.76.0/24 created
	I0816 00:11:53.394958 2418220 kic.go:121] calculated static IP "192.168.76.2" for the "embed-certs-951478" container
	I0816 00:11:53.395050 2418220 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
	I0816 00:11:53.416557 2418220 cli_runner.go:164] Run: docker volume create embed-certs-951478 --label name.minikube.sigs.k8s.io=embed-certs-951478 --label created_by.minikube.sigs.k8s.io=true
	I0816 00:11:53.434689 2418220 oci.go:103] Successfully created a docker volume embed-certs-951478
	I0816 00:11:53.434782 2418220 cli_runner.go:164] Run: docker run --rm --name embed-certs-951478-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=embed-certs-951478 --entrypoint /usr/bin/test -v embed-certs-951478:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d -d /var/lib
	I0816 00:11:54.095026 2418220 oci.go:107] Successfully prepared a docker volume embed-certs-951478
	I0816 00:11:54.095082 2418220 preload.go:131] Checking if preload exists for k8s version v1.31.0 and runtime docker
	I0816 00:11:54.095102 2418220 kic.go:194] Starting extracting preloaded images to volume ...
	I0816 00:11:54.095185 2418220 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.0-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v embed-certs-951478:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d -I lz4 -xf /preloaded.tar -C /extractDir
	I0816 00:11:57.838907 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:59.839494 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:11:58.044105 2418220 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.0-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v embed-certs-951478:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d -I lz4 -xf /preloaded.tar -C /extractDir: (3.948870819s)
	I0816 00:11:58.044135 2418220 kic.go:203] duration metric: took 3.94902846s to extract preloaded images to volume ...
	W0816 00:11:58.044296 2418220 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
	I0816 00:11:58.044407 2418220 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
	I0816 00:11:58.097979 2418220 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname embed-certs-951478 --name embed-certs-951478 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=embed-certs-951478 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=embed-certs-951478 --network embed-certs-951478 --ip 192.168.76.2 --volume embed-certs-951478:/var --security-opt apparmor=unconfined --memory=2200mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d
	I0816 00:11:58.453583 2418220 cli_runner.go:164] Run: docker container inspect embed-certs-951478 --format={{.State.Running}}
	I0816 00:11:58.476619 2418220 cli_runner.go:164] Run: docker container inspect embed-certs-951478 --format={{.State.Status}}
	I0816 00:11:58.500432 2418220 cli_runner.go:164] Run: docker exec embed-certs-951478 stat /var/lib/dpkg/alternatives/iptables
	I0816 00:11:58.557486 2418220 oci.go:144] the created container "embed-certs-951478" has a running status.
	I0816 00:11:58.557513 2418220 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa...
	I0816 00:11:59.396695 2418220 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
	I0816 00:11:59.421830 2418220 cli_runner.go:164] Run: docker container inspect embed-certs-951478 --format={{.State.Status}}
	I0816 00:11:59.449883 2418220 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
	I0816 00:11:59.449905 2418220 kic_runner.go:114] Args: [docker exec --privileged embed-certs-951478 chown docker:docker /home/docker/.ssh/authorized_keys]
	I0816 00:11:59.510542 2418220 cli_runner.go:164] Run: docker container inspect embed-certs-951478 --format={{.State.Status}}
	I0816 00:11:59.551991 2418220 machine.go:93] provisionDockerMachine start ...
	I0816 00:11:59.552082 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
	I0816 00:11:59.574972 2418220 main.go:141] libmachine: Using SSH client type: native
	I0816 00:11:59.575247 2418220 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil>  [] 0s} 127.0.0.1 35089 <nil> <nil>}
	I0816 00:11:59.575257 2418220 main.go:141] libmachine: About to run SSH command:
	hostname
	I0816 00:11:59.717314 2418220 main.go:141] libmachine: SSH cmd err, output: <nil>: embed-certs-951478
	
	I0816 00:11:59.717336 2418220 ubuntu.go:169] provisioning hostname "embed-certs-951478"
	I0816 00:11:59.717410 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
	I0816 00:11:59.738696 2418220 main.go:141] libmachine: Using SSH client type: native
	I0816 00:11:59.739041 2418220 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil>  [] 0s} 127.0.0.1 35089 <nil> <nil>}
	I0816 00:11:59.739058 2418220 main.go:141] libmachine: About to run SSH command:
	sudo hostname embed-certs-951478 && echo "embed-certs-951478" | sudo tee /etc/hostname
	I0816 00:11:59.897922 2418220 main.go:141] libmachine: SSH cmd err, output: <nil>: embed-certs-951478
	
	I0816 00:11:59.898047 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
	I0816 00:11:59.917185 2418220 main.go:141] libmachine: Using SSH client type: native
	I0816 00:11:59.917440 2418220 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil>  [] 0s} 127.0.0.1 35089 <nil> <nil>}
	I0816 00:11:59.917459 2418220 main.go:141] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\sembed-certs-951478' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 embed-certs-951478/g' /etc/hosts;
				else 
					echo '127.0.1.1 embed-certs-951478' | sudo tee -a /etc/hosts; 
				fi
			fi
	I0816 00:12:00.079149 2418220 main.go:141] libmachine: SSH cmd err, output: <nil>: 
	I0816 00:12:00.079240 2418220 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19452-2026001/.minikube CaCertPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19452-2026001/.minikube}
	I0816 00:12:00.079317 2418220 ubuntu.go:177] setting up certificates
	I0816 00:12:00.079358 2418220 provision.go:84] configureAuth start
	I0816 00:12:00.079470 2418220 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" embed-certs-951478
	I0816 00:12:00.131755 2418220 provision.go:143] copyHostCerts
	I0816 00:12:00.131839 2418220 exec_runner.go:144] found /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.pem, removing ...
	I0816 00:12:00.131850 2418220 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.pem
	I0816 00:12:00.131943 2418220 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.pem (1082 bytes)
	I0816 00:12:00.132342 2418220 exec_runner.go:144] found /home/jenkins/minikube-integration/19452-2026001/.minikube/cert.pem, removing ...
	I0816 00:12:00.132368 2418220 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19452-2026001/.minikube/cert.pem
	I0816 00:12:00.132425 2418220 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19452-2026001/.minikube/cert.pem (1123 bytes)
	I0816 00:12:00.132560 2418220 exec_runner.go:144] found /home/jenkins/minikube-integration/19452-2026001/.minikube/key.pem, removing ...
	I0816 00:12:00.132568 2418220 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19452-2026001/.minikube/key.pem
	I0816 00:12:00.132603 2418220 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19452-2026001/.minikube/key.pem (1675 bytes)
	I0816 00:12:00.132684 2418220 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca-key.pem org=jenkins.embed-certs-951478 san=[127.0.0.1 192.168.76.2 embed-certs-951478 localhost minikube]
	I0816 00:12:00.660182 2418220 provision.go:177] copyRemoteCerts
	I0816 00:12:00.660293 2418220 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I0816 00:12:00.660342 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
	I0816 00:12:00.677903 2418220 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35089 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa Username:docker}
	I0816 00:12:00.770741 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I0816 00:12:00.795035 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server.pem --> /etc/docker/server.pem (1224 bytes)
	I0816 00:12:00.820111 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
	I0816 00:12:00.846492 2418220 provision.go:87] duration metric: took 767.106659ms to configureAuth
	I0816 00:12:00.846517 2418220 ubuntu.go:193] setting minikube options for container-runtime
	I0816 00:12:00.846698 2418220 config.go:182] Loaded profile config "embed-certs-951478": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
	I0816 00:12:00.846753 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
	I0816 00:12:00.863080 2418220 main.go:141] libmachine: Using SSH client type: native
	I0816 00:12:00.863335 2418220 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil>  [] 0s} 127.0.0.1 35089 <nil> <nil>}
	I0816 00:12:00.863352 2418220 main.go:141] libmachine: About to run SSH command:
	df --output=fstype / | tail -n 1
	I0816 00:12:01.006556 2418220 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
	
	I0816 00:12:01.006582 2418220 ubuntu.go:71] root file system type: overlay
	I0816 00:12:01.006719 2418220 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
	I0816 00:12:01.006801 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
	I0816 00:12:01.026029 2418220 main.go:141] libmachine: Using SSH client type: native
	I0816 00:12:01.026278 2418220 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil>  [] 0s} 127.0.0.1 35089 <nil> <nil>}
	I0816 00:12:01.026365 2418220 main.go:141] libmachine: About to run SSH command:
	sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	BindsTo=containerd.service
	After=network-online.target firewalld.service containerd.service
	Wants=network-online.target
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=on-failure
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP \$MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	
	[Install]
	WantedBy=multi-user.target
	" | sudo tee /lib/systemd/system/docker.service.new
	I0816 00:12:01.172381 2418220 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	BindsTo=containerd.service
	After=network-online.target firewalld.service containerd.service
	Wants=network-online.target
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	
	[Service]
	Type=notify
	Restart=on-failure
	
	
	
	# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	# The base configuration already specifies an 'ExecStart=...' command. The first directive
	# here is to clear out that command inherited from the base configuration. Without this,
	# the command from the base configuration and the command specified here are treated as
	# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	# will catch this invalid input and refuse to start the service with an error like:
	#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	
	# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP $MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	TimeoutStartSec=0
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	
	[Install]
	WantedBy=multi-user.target
	
	I0816 00:12:01.172570 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
	I0816 00:12:01.193164 2418220 main.go:141] libmachine: Using SSH client type: native
	I0816 00:12:01.193510 2418220 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil>  [] 0s} 127.0.0.1 35089 <nil> <nil>}
	I0816 00:12:01.193534 2418220 main.go:141] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
	I0816 00:12:02.064806 2418220 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service	2024-08-12 11:49:05.000000000 +0000
	+++ /lib/systemd/system/docker.service.new	2024-08-16 00:12:01.169480883 +0000
	@@ -1,46 +1,49 @@
	 [Unit]
	 Description=Docker Application Container Engine
	 Documentation=https://docs.docker.com
	-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
	-Wants=network-online.target containerd.service
	+BindsTo=containerd.service
	+After=network-online.target firewalld.service containerd.service
	+Wants=network-online.target
	 Requires=docker.socket
	+StartLimitBurst=3
	+StartLimitIntervalSec=60
	 
	 [Service]
	 Type=notify
	-# the default is not to use systemd for cgroups because the delegate issues still
	-# exists and systemd currently does not support the cgroup feature set required
	-# for containers run by docker
	-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
	-ExecReload=/bin/kill -s HUP $MAINPID
	-TimeoutStartSec=0
	-RestartSec=2
	-Restart=always
	+Restart=on-failure
	 
	-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
	-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
	-# to make them work for either version of systemd.
	-StartLimitBurst=3
	 
	-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
	-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
	-# this option work for either version of systemd.
	-StartLimitInterval=60s
	+
	+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
	+# The base configuration already specifies an 'ExecStart=...' command. The first directive
	+# here is to clear out that command inherited from the base configuration. Without this,
	+# the command from the base configuration and the command specified here are treated as
	+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
	+# will catch this invalid input and refuse to start the service with an error like:
	+#  Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
	+
	+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
	+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
	+ExecStart=
	+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 
	+ExecReload=/bin/kill -s HUP $MAINPID
	 
	 # Having non-zero Limit*s causes performance problems due to accounting overhead
	 # in the kernel. We recommend using cgroups to do container-local accounting.
	+LimitNOFILE=infinity
	 LimitNPROC=infinity
	 LimitCORE=infinity
	 
	-# Comment TasksMax if your systemd version does not support it.
	-# Only systemd 226 and above support this option.
	+# Uncomment TasksMax if your systemd version supports it.
	+# Only systemd 226 and above support this version.
	 TasksMax=infinity
	+TimeoutStartSec=0
	 
	 # set delegate yes so that systemd does not reset the cgroups of docker containers
	 Delegate=yes
	 
	 # kill only the docker process, not all processes in the cgroup
	 KillMode=process
	-OOMScoreAdjust=-500
	 
	 [Install]
	 WantedBy=multi-user.target
	Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
	Executing: /lib/systemd/systemd-sysv-install enable docker
	
	I0816 00:12:02.064847 2418220 machine.go:96] duration metric: took 2.512837254s to provisionDockerMachine
	I0816 00:12:02.064858 2418220 client.go:171] duration metric: took 8.813017466s to LocalClient.Create
	I0816 00:12:02.064873 2418220 start.go:167] duration metric: took 8.813079372s to libmachine.API.Create "embed-certs-951478"
	I0816 00:12:02.064881 2418220 start.go:293] postStartSetup for "embed-certs-951478" (driver="docker")
	I0816 00:12:02.064894 2418220 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I0816 00:12:02.064975 2418220 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I0816 00:12:02.065019 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
	I0816 00:12:02.083025 2418220 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35089 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa Username:docker}
	I0816 00:12:02.183115 2418220 ssh_runner.go:195] Run: cat /etc/os-release
	I0816 00:12:02.186672 2418220 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
	I0816 00:12:02.186709 2418220 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
	I0816 00:12:02.186734 2418220 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
	I0816 00:12:02.186742 2418220 info.go:137] Remote host: Ubuntu 22.04.4 LTS
	I0816 00:12:02.186752 2418220 filesync.go:126] Scanning /home/jenkins/minikube-integration/19452-2026001/.minikube/addons for local assets ...
	I0816 00:12:02.186812 2418220 filesync.go:126] Scanning /home/jenkins/minikube-integration/19452-2026001/.minikube/files for local assets ...
	I0816 00:12:02.186893 2418220 filesync.go:149] local asset: /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem -> 20313962.pem in /etc/ssl/certs
	I0816 00:12:02.186997 2418220 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I0816 00:12:02.196047 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem --> /etc/ssl/certs/20313962.pem (1708 bytes)
	I0816 00:12:02.221475 2418220 start.go:296] duration metric: took 156.577962ms for postStartSetup
	I0816 00:12:02.221860 2418220 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" embed-certs-951478
	I0816 00:12:02.243604 2418220 profile.go:143] Saving config to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/config.json ...
	I0816 00:12:02.243915 2418220 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0816 00:12:02.243967 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
	I0816 00:12:02.260757 2418220 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35089 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa Username:docker}
	I0816 00:12:02.354609 2418220 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
	I0816 00:12:02.359646 2418220 start.go:128] duration metric: took 9.112328697s to createHost
	I0816 00:12:02.359682 2418220 start.go:83] releasing machines lock for "embed-certs-951478", held for 9.112502288s
	I0816 00:12:02.359788 2418220 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" embed-certs-951478
	I0816 00:12:02.376769 2418220 ssh_runner.go:195] Run: cat /version.json
	I0816 00:12:02.376851 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
	I0816 00:12:02.377162 2418220 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I0816 00:12:02.377230 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
	I0816 00:12:02.394583 2418220 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35089 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa Username:docker}
	I0816 00:12:02.415353 2418220 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35089 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa Username:docker}
	I0816 00:12:02.485217 2418220 ssh_runner.go:195] Run: systemctl --version
	I0816 00:12:02.628573 2418220 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	I0816 00:12:02.632897 2418220 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
	I0816 00:12:02.658493 2418220 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
	I0816 00:12:02.658614 2418220 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I0816 00:12:02.688844 2418220 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
	I0816 00:12:02.688873 2418220 start.go:495] detecting cgroup driver to use...
	I0816 00:12:02.688909 2418220 detect.go:187] detected "cgroupfs" cgroup driver on host os
	I0816 00:12:02.689026 2418220 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I0816 00:12:02.706512 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
	I0816 00:12:02.718130 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I0816 00:12:02.729240 2418220 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
	I0816 00:12:02.729305 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I0816 00:12:02.739565 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0816 00:12:02.751443 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I0816 00:12:02.761866 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I0816 00:12:02.772032 2418220 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I0816 00:12:02.782187 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I0816 00:12:02.792161 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
	I0816 00:12:02.802920 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1  enable_unprivileged_ports = true|' /etc/containerd/config.toml"
	I0816 00:12:02.814194 2418220 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I0816 00:12:02.823612 2418220 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I0816 00:12:02.832302 2418220 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0816 00:12:02.934319 2418220 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I0816 00:12:03.044008 2418220 start.go:495] detecting cgroup driver to use...
	I0816 00:12:03.044110 2418220 detect.go:187] detected "cgroupfs" cgroup driver on host os
	I0816 00:12:03.044203 2418220 ssh_runner.go:195] Run: sudo systemctl cat docker.service
	I0816 00:12:03.065219 2418220 cruntime.go:279] skipping containerd shutdown because we are bound to it
	I0816 00:12:03.065352 2418220 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I0816 00:12:03.079776 2418220 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
	" | sudo tee /etc/crictl.yaml"
	I0816 00:12:03.099909 2418220 ssh_runner.go:195] Run: which cri-dockerd
	I0816 00:12:03.104978 2418220 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
	I0816 00:12:03.116382 2418220 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
	I0816 00:12:03.158620 2418220 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
	I0816 00:12:03.288059 2418220 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
	I0816 00:12:03.399961 2418220 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
	I0816 00:12:03.400176 2418220 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
	I0816 00:12:03.420911 2418220 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0816 00:12:03.527133 2418220 ssh_runner.go:195] Run: sudo systemctl restart docker
	I0816 00:12:03.828027 2418220 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
	I0816 00:12:03.847150 2418220 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I0816 00:12:03.861253 2418220 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
	I0816 00:12:03.960159 2418220 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
	I0816 00:12:04.051165 2418220 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0816 00:12:04.149394 2418220 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
	I0816 00:12:04.164191 2418220 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I0816 00:12:04.176914 2418220 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0816 00:12:04.279586 2418220 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I0816 00:12:04.371250 2418220 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
	I0816 00:12:04.371347 2418220 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
	I0816 00:12:04.375488 2418220 start.go:563] Will wait 60s for crictl version
	I0816 00:12:04.375573 2418220 ssh_runner.go:195] Run: which crictl
	I0816 00:12:04.379030 2418220 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I0816 00:12:04.426385 2418220 start.go:579] Version:  0.1.0
	RuntimeName:  docker
	RuntimeVersion:  27.1.2
	RuntimeApiVersion:  v1
	I0816 00:12:04.426483 2418220 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0816 00:12:04.449167 2418220 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I0816 00:12:01.841503 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:04.338013 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:06.340432 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:04.498171 2418220 out.go:235] * Preparing Kubernetes v1.31.0 on Docker 27.1.2 ...
	I0816 00:12:04.498273 2418220 cli_runner.go:164] Run: docker network inspect embed-certs-951478 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
	I0816 00:12:04.518125 2418220 ssh_runner.go:195] Run: grep 192.168.76.1	host.minikube.internal$ /etc/hosts
	I0816 00:12:04.522596 2418220 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0816 00:12:04.536643 2418220 kubeadm.go:883] updating cluster {Name:embed-certs-951478 KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:embed-certs-951478 Namespace:default APIServerHAVIP: APIServerName:minikubeCA AP
IServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
	I0816 00:12:04.536784 2418220 preload.go:131] Checking if preload exists for k8s version v1.31.0 and runtime docker
	I0816 00:12:04.536848 2418220 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I0816 00:12:04.559266 2418220 docker.go:685] Got preloaded images: -- stdout --
	registry.k8s.io/kube-apiserver:v1.31.0
	registry.k8s.io/kube-scheduler:v1.31.0
	registry.k8s.io/kube-controller-manager:v1.31.0
	registry.k8s.io/kube-proxy:v1.31.0
	registry.k8s.io/etcd:3.5.15-0
	registry.k8s.io/pause:3.10
	registry.k8s.io/coredns/coredns:v1.11.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	
	-- /stdout --
	I0816 00:12:04.559291 2418220 docker.go:615] Images already preloaded, skipping extraction
	I0816 00:12:04.559361 2418220 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I0816 00:12:04.584452 2418220 docker.go:685] Got preloaded images: -- stdout --
	registry.k8s.io/kube-apiserver:v1.31.0
	registry.k8s.io/kube-scheduler:v1.31.0
	registry.k8s.io/kube-controller-manager:v1.31.0
	registry.k8s.io/kube-proxy:v1.31.0
	registry.k8s.io/etcd:3.5.15-0
	registry.k8s.io/pause:3.10
	registry.k8s.io/coredns/coredns:v1.11.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	
	-- /stdout --
	I0816 00:12:04.584478 2418220 cache_images.go:84] Images are preloaded, skipping loading
	I0816 00:12:04.584497 2418220 kubeadm.go:934] updating node { 192.168.76.2 8443 v1.31.0 docker true true} ...
	I0816 00:12:04.584595 2418220 kubeadm.go:946] kubelet [Unit]
	Wants=docker.socket
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.31.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=embed-certs-951478 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
	
	[Install]
	 config:
	{KubernetesVersion:v1.31.0 ClusterName:embed-certs-951478 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
	I0816 00:12:04.584668 2418220 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
	I0816 00:12:04.651013 2418220 cni.go:84] Creating CNI manager for ""
	I0816 00:12:04.651044 2418220 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0816 00:12:04.651066 2418220 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
	I0816 00:12:04.651086 2418220 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.31.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:embed-certs-951478 NodeName:embed-certs-951478 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:
/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I0816 00:12:04.651251 2418220 kubeadm.go:187] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.76.2
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///var/run/cri-dockerd.sock
	  name: "embed-certs-951478"
	  kubeletExtraArgs:
	    node-ip: 192.168.76.2
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta3
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
	  extraArgs:
	    enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    allocate-node-cidrs: "true"
	    leader-elect: "false"
	scheduler:
	  extraArgs:
	    leader-elect: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	    extraArgs:
	      proxy-refresh-interval: "70000"
	kubernetesVersion: v1.31.0
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%"
	  nodefs.inodesFree: "0%"
	  imagefs.available: "0%"
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I0816 00:12:04.651326 2418220 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.31.0
	I0816 00:12:04.661002 2418220 binaries.go:44] Found k8s binaries, skipping transfer
	I0816 00:12:04.661128 2418220 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I0816 00:12:04.670347 2418220 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
	I0816 00:12:04.689568 2418220 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I0816 00:12:04.709361 2418220 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2160 bytes)
	I0816 00:12:04.728114 2418220 ssh_runner.go:195] Run: grep 192.168.76.2	control-plane.minikube.internal$ /etc/hosts
	I0816 00:12:04.731663 2418220 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I0816 00:12:04.744044 2418220 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0816 00:12:04.842165 2418220 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I0816 00:12:04.859515 2418220 certs.go:68] Setting up /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478 for IP: 192.168.76.2
	I0816 00:12:04.859538 2418220 certs.go:194] generating shared ca certs ...
	I0816 00:12:04.859555 2418220 certs.go:226] acquiring lock for ca certs: {Name:mkddf294a5c2bc6874920ab9b3e5ac4767302c25 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0816 00:12:04.859688 2418220 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.key
	I0816 00:12:04.859736 2418220 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/proxy-client-ca.key
	I0816 00:12:04.859749 2418220 certs.go:256] generating profile certs ...
	I0816 00:12:04.859804 2418220 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/client.key
	I0816 00:12:04.859821 2418220 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/client.crt with IP's: []
	I0816 00:12:05.337591 2418220 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/client.crt ...
	I0816 00:12:05.337657 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/client.crt: {Name:mk66acae4dba4c0e6f8b3691682ccc08e20ed8fb Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0816 00:12:05.338293 2418220 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/client.key ...
	I0816 00:12:05.338317 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/client.key: {Name:mk56dcbe25ac76068b8b8c268c775b98a6f606ab Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0816 00:12:05.338479 2418220 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.key.742a43b5
	I0816 00:12:05.338503 2418220 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.crt.742a43b5 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
	I0816 00:12:05.959980 2418220 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.crt.742a43b5 ...
	I0816 00:12:05.960017 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.crt.742a43b5: {Name:mkd19119812e3979aba84a5f28384010afcbe451 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0816 00:12:05.960605 2418220 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.key.742a43b5 ...
	I0816 00:12:05.960626 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.key.742a43b5: {Name:mkb2bc172500f8845b5c37850b85cb33307854d4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0816 00:12:05.961261 2418220 certs.go:381] copying /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.crt.742a43b5 -> /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.crt
	I0816 00:12:05.961361 2418220 certs.go:385] copying /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.key.742a43b5 -> /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.key
	I0816 00:12:05.961438 2418220 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.key
	I0816 00:12:05.961461 2418220 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.crt with IP's: []
	I0816 00:12:06.500714 2418220 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.crt ...
	I0816 00:12:06.500747 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.crt: {Name:mkbd1b9d32ab9b584765ac042a8beb09a4272123 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0816 00:12:06.500929 2418220 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.key ...
	I0816 00:12:06.500947 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.key: {Name:mk4e167089caf226b28f1353facbccc7b07f9235 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0816 00:12:06.501141 2418220 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/2031396.pem (1338 bytes)
	W0816 00:12:06.501188 2418220 certs.go:480] ignoring /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/2031396_empty.pem, impossibly tiny 0 bytes
	I0816 00:12:06.501202 2418220 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca-key.pem (1679 bytes)
	I0816 00:12:06.501230 2418220 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem (1082 bytes)
	I0816 00:12:06.501259 2418220 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/cert.pem (1123 bytes)
	I0816 00:12:06.501289 2418220 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/key.pem (1675 bytes)
	I0816 00:12:06.501336 2418220 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem (1708 bytes)
	I0816 00:12:06.502082 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I0816 00:12:06.527666 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
	I0816 00:12:06.553368 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I0816 00:12:06.579782 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
	I0816 00:12:06.605806 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1428 bytes)
	I0816 00:12:06.631485 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
	I0816 00:12:06.657393 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I0816 00:12:06.682949 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
	I0816 00:12:06.707769 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/2031396.pem --> /usr/share/ca-certificates/2031396.pem (1338 bytes)
	I0816 00:12:06.733548 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem --> /usr/share/ca-certificates/20313962.pem (1708 bytes)
	I0816 00:12:06.759041 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I0816 00:12:06.784180 2418220 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I0816 00:12:06.802964 2418220 ssh_runner.go:195] Run: openssl version
	I0816 00:12:06.808736 2418220 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/2031396.pem && ln -fs /usr/share/ca-certificates/2031396.pem /etc/ssl/certs/2031396.pem"
	I0816 00:12:06.818641 2418220 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2031396.pem
	I0816 00:12:06.822668 2418220 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Aug 15 23:13 /usr/share/ca-certificates/2031396.pem
	I0816 00:12:06.822733 2418220 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2031396.pem
	I0816 00:12:06.829847 2418220 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/2031396.pem /etc/ssl/certs/51391683.0"
	I0816 00:12:06.841351 2418220 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/20313962.pem && ln -fs /usr/share/ca-certificates/20313962.pem /etc/ssl/certs/20313962.pem"
	I0816 00:12:06.851576 2418220 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/20313962.pem
	I0816 00:12:06.855441 2418220 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Aug 15 23:13 /usr/share/ca-certificates/20313962.pem
	I0816 00:12:06.855504 2418220 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/20313962.pem
	I0816 00:12:06.862508 2418220 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/20313962.pem /etc/ssl/certs/3ec20f2e.0"
	I0816 00:12:06.871943 2418220 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I0816 00:12:06.885183 2418220 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I0816 00:12:06.888730 2418220 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Aug 15 23:06 /usr/share/ca-certificates/minikubeCA.pem
	I0816 00:12:06.888795 2418220 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I0816 00:12:06.895795 2418220 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I0816 00:12:06.907234 2418220 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
	I0816 00:12:06.911424 2418220 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
	I0816 00:12:06.911519 2418220 kubeadm.go:392] StartCluster: {Name:embed-certs-951478 KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:embed-certs-951478 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISe
rverNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0816 00:12:06.911654 2418220 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I0816 00:12:06.931600 2418220 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I0816 00:12:06.940701 2418220 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I0816 00:12:06.950675 2418220 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
	I0816 00:12:06.950751 2418220 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I0816 00:12:06.960103 2418220 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
	I0816 00:12:06.960125 2418220 kubeadm.go:157] found existing configuration files:
	
	I0816 00:12:06.960198 2418220 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
	I0816 00:12:06.969241 2418220 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/admin.conf: No such file or directory
	I0816 00:12:06.969308 2418220 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
	I0816 00:12:06.978006 2418220 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
	I0816 00:12:06.987765 2418220 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/kubelet.conf: No such file or directory
	I0816 00:12:06.987836 2418220 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
	I0816 00:12:06.996521 2418220 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
	I0816 00:12:07.006173 2418220 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/controller-manager.conf: No such file or directory
	I0816 00:12:07.006308 2418220 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
	I0816 00:12:07.015686 2418220 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
	I0816 00:12:07.025031 2418220 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/scheduler.conf: No such file or directory
	I0816 00:12:07.025096 2418220 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
	I0816 00:12:07.033894 2418220 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml  --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
	I0816 00:12:07.074916 2418220 kubeadm.go:310] [init] Using Kubernetes version: v1.31.0
	I0816 00:12:07.075009 2418220 kubeadm.go:310] [preflight] Running pre-flight checks
	I0816 00:12:07.097390 2418220 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
	I0816 00:12:07.097477 2418220 kubeadm.go:310] KERNEL_VERSION: 5.15.0-1067-aws
	I0816 00:12:07.097542 2418220 kubeadm.go:310] OS: Linux
	I0816 00:12:07.097599 2418220 kubeadm.go:310] CGROUPS_CPU: enabled
	I0816 00:12:07.097660 2418220 kubeadm.go:310] CGROUPS_CPUACCT: enabled
	I0816 00:12:07.097719 2418220 kubeadm.go:310] CGROUPS_CPUSET: enabled
	I0816 00:12:07.097784 2418220 kubeadm.go:310] CGROUPS_DEVICES: enabled
	I0816 00:12:07.097840 2418220 kubeadm.go:310] CGROUPS_FREEZER: enabled
	I0816 00:12:07.097910 2418220 kubeadm.go:310] CGROUPS_MEMORY: enabled
	I0816 00:12:07.097972 2418220 kubeadm.go:310] CGROUPS_PIDS: enabled
	I0816 00:12:07.098037 2418220 kubeadm.go:310] CGROUPS_HUGETLB: enabled
	I0816 00:12:07.098103 2418220 kubeadm.go:310] CGROUPS_BLKIO: enabled
	I0816 00:12:07.161709 2418220 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
	I0816 00:12:07.161839 2418220 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
	I0816 00:12:07.161937 2418220 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
	I0816 00:12:07.184891 2418220 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
	I0816 00:12:07.189145 2418220 out.go:235]   - Generating certificates and keys ...
	I0816 00:12:07.189249 2418220 kubeadm.go:310] [certs] Using existing ca certificate authority
	I0816 00:12:07.189320 2418220 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
	I0816 00:12:07.387551 2418220 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
	I0816 00:12:08.847475 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:11.339958 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:08.117701 2418220 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
	I0816 00:12:08.527623 2418220 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
	I0816 00:12:09.032530 2418220 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
	I0816 00:12:09.377734 2418220 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
	I0816 00:12:09.378068 2418220 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [embed-certs-951478 localhost] and IPs [192.168.76.2 127.0.0.1 ::1]
	I0816 00:12:10.081288 2418220 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
	I0816 00:12:10.081663 2418220 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [embed-certs-951478 localhost] and IPs [192.168.76.2 127.0.0.1 ::1]
	I0816 00:12:10.535653 2418220 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
	I0816 00:12:10.808876 2418220 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
	I0816 00:12:11.320771 2418220 kubeadm.go:310] [certs] Generating "sa" key and public key
	I0816 00:12:11.321096 2418220 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
	I0816 00:12:11.805389 2418220 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
	I0816 00:12:12.158137 2418220 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
	I0816 00:12:12.427642 2418220 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
	I0816 00:12:12.707148 2418220 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
	I0816 00:12:13.032553 2418220 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
	I0816 00:12:13.033346 2418220 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
	I0816 00:12:13.036465 2418220 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
	I0816 00:12:13.341238 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:15.358241 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:13.038843 2418220 out.go:235]   - Booting up control plane ...
	I0816 00:12:13.038945 2418220 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
	I0816 00:12:13.039020 2418220 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
	I0816 00:12:13.039732 2418220 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
	I0816 00:12:13.051965 2418220 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
	I0816 00:12:13.060044 2418220 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
	I0816 00:12:13.060100 2418220 kubeadm.go:310] [kubelet-start] Starting the kubelet
	I0816 00:12:13.169045 2418220 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
	I0816 00:12:13.169162 2418220 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
	I0816 00:12:14.174832 2418220 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.005676671s
	I0816 00:12:14.174939 2418220 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
	I0816 00:12:20.676135 2418220 kubeadm.go:310] [api-check] The API server is healthy after 6.501484628s
	I0816 00:12:20.696164 2418220 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
	I0816 00:12:20.709659 2418220 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
	I0816 00:12:20.745600 2418220 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
	I0816 00:12:20.745828 2418220 kubeadm.go:310] [mark-control-plane] Marking the node embed-certs-951478 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
	I0816 00:12:20.758460 2418220 kubeadm.go:310] [bootstrap-token] Using token: bdnnnv.yz6t0ov6x5s6xvn8
	I0816 00:12:17.842997 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:20.343831 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:20.760813 2418220 out.go:235]   - Configuring RBAC rules ...
	I0816 00:12:20.761032 2418220 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
	I0816 00:12:20.767109 2418220 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
	I0816 00:12:20.776820 2418220 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
	I0816 00:12:20.781967 2418220 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
	I0816 00:12:20.801745 2418220 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
	I0816 00:12:20.807596 2418220 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
	I0816 00:12:21.083960 2418220 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
	I0816 00:12:21.523187 2418220 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
	I0816 00:12:22.083227 2418220 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
	I0816 00:12:22.084458 2418220 kubeadm.go:310] 
	I0816 00:12:22.084543 2418220 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
	I0816 00:12:22.084559 2418220 kubeadm.go:310] 
	I0816 00:12:22.084655 2418220 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
	I0816 00:12:22.084672 2418220 kubeadm.go:310] 
	I0816 00:12:22.084699 2418220 kubeadm.go:310]   mkdir -p $HOME/.kube
	I0816 00:12:22.084770 2418220 kubeadm.go:310]   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	I0816 00:12:22.084825 2418220 kubeadm.go:310]   sudo chown $(id -u):$(id -g) $HOME/.kube/config
	I0816 00:12:22.084834 2418220 kubeadm.go:310] 
	I0816 00:12:22.084887 2418220 kubeadm.go:310] Alternatively, if you are the root user, you can run:
	I0816 00:12:22.084902 2418220 kubeadm.go:310] 
	I0816 00:12:22.084955 2418220 kubeadm.go:310]   export KUBECONFIG=/etc/kubernetes/admin.conf
	I0816 00:12:22.084963 2418220 kubeadm.go:310] 
	I0816 00:12:22.085014 2418220 kubeadm.go:310] You should now deploy a pod network to the cluster.
	I0816 00:12:22.085095 2418220 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
	I0816 00:12:22.085167 2418220 kubeadm.go:310]   https://kubernetes.io/docs/concepts/cluster-administration/addons/
	I0816 00:12:22.085176 2418220 kubeadm.go:310] 
	I0816 00:12:22.085257 2418220 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
	I0816 00:12:22.085335 2418220 kubeadm.go:310] and service account keys on each node and then running the following as root:
	I0816 00:12:22.085344 2418220 kubeadm.go:310] 
	I0816 00:12:22.085456 2418220 kubeadm.go:310]   kubeadm join control-plane.minikube.internal:8443 --token bdnnnv.yz6t0ov6x5s6xvn8 \
	I0816 00:12:22.085575 2418220 kubeadm.go:310] 	--discovery-token-ca-cert-hash sha256:4ca757b3a2e756ea9f20bac9790b7eeeaad243a2d641f2fcc3157bb9ecd2082f \
	I0816 00:12:22.085638 2418220 kubeadm.go:310] 	--control-plane 
	I0816 00:12:22.085653 2418220 kubeadm.go:310] 
	I0816 00:12:22.085738 2418220 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
	I0816 00:12:22.085754 2418220 kubeadm.go:310] 
	I0816 00:12:22.085840 2418220 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token bdnnnv.yz6t0ov6x5s6xvn8 \
	I0816 00:12:22.085951 2418220 kubeadm.go:310] 	--discovery-token-ca-cert-hash sha256:4ca757b3a2e756ea9f20bac9790b7eeeaad243a2d641f2fcc3157bb9ecd2082f 
	I0816 00:12:22.091855 2418220 kubeadm.go:310] W0816 00:12:07.071548    1841 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "ClusterConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
	I0816 00:12:22.092233 2418220 kubeadm.go:310] W0816 00:12:07.072512    1841 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "InitConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
	I0816 00:12:22.092496 2418220 kubeadm.go:310] 	[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1067-aws\n", err: exit status 1
	I0816 00:12:22.092797 2418220 kubeadm.go:310] 	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
	I0816 00:12:22.092860 2418220 cni.go:84] Creating CNI manager for ""
	I0816 00:12:22.092883 2418220 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0816 00:12:22.094886 2418220 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
	I0816 00:12:22.096640 2418220 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
	I0816 00:12:22.105691 2418220 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
	I0816 00:12:22.127353 2418220 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I0816 00:12:22.127478 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
	I0816 00:12:22.127563 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes embed-certs-951478 minikube.k8s.io/updated_at=2024_08_16T00_12_22_0700 minikube.k8s.io/version=v1.33.1 minikube.k8s.io/commit=fe9c1d9e27059a205b0df8e5e482803b65ef8774 minikube.k8s.io/name=embed-certs-951478 minikube.k8s.io/primary=true
	I0816 00:12:22.296677 2418220 ops.go:34] apiserver oom_adj: -16
	I0816 00:12:22.296777 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0816 00:12:22.796981 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0816 00:12:23.296930 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0816 00:12:23.796873 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0816 00:12:24.296831 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0816 00:12:24.797739 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0816 00:12:25.296897 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0816 00:12:25.797876 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0816 00:12:26.297402 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I0816 00:12:26.402965 2418220 kubeadm.go:1113] duration metric: took 4.275532331s to wait for elevateKubeSystemPrivileges
	I0816 00:12:26.402997 2418220 kubeadm.go:394] duration metric: took 19.491487864s to StartCluster
	I0816 00:12:26.403016 2418220 settings.go:142] acquiring lock: {Name:mkd932093f6b6db884e5d5f97d2ea9be134ab309 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0816 00:12:26.403084 2418220 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/19452-2026001/kubeconfig
	I0816 00:12:26.404476 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/kubeconfig: {Name:mkb1a4d12f06c0f193e7cb7c118eeb997c3969bc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0816 00:12:26.404725 2418220 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}
	I0816 00:12:26.404852 2418220 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
	I0816 00:12:26.405125 2418220 config.go:182] Loaded profile config "embed-certs-951478": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
	I0816 00:12:26.405167 2418220 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
	I0816 00:12:26.405231 2418220 addons.go:69] Setting storage-provisioner=true in profile "embed-certs-951478"
	I0816 00:12:26.405252 2418220 addons.go:234] Setting addon storage-provisioner=true in "embed-certs-951478"
	I0816 00:12:26.405277 2418220 host.go:66] Checking if "embed-certs-951478" exists ...
	I0816 00:12:26.406088 2418220 addons.go:69] Setting default-storageclass=true in profile "embed-certs-951478"
	I0816 00:12:26.406117 2418220 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "embed-certs-951478"
	I0816 00:12:26.406218 2418220 cli_runner.go:164] Run: docker container inspect embed-certs-951478 --format={{.State.Status}}
	I0816 00:12:26.406379 2418220 cli_runner.go:164] Run: docker container inspect embed-certs-951478 --format={{.State.Status}}
	I0816 00:12:26.407352 2418220 out.go:177] * Verifying Kubernetes components...
	I0816 00:12:26.409961 2418220 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I0816 00:12:26.440984 2418220 out.go:177]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I0816 00:12:22.839620 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:25.338275 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:26.442928 2418220 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I0816 00:12:26.442946 2418220 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I0816 00:12:26.443011 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
	I0816 00:12:26.459378 2418220 addons.go:234] Setting addon default-storageclass=true in "embed-certs-951478"
	I0816 00:12:26.459423 2418220 host.go:66] Checking if "embed-certs-951478" exists ...
	I0816 00:12:26.459849 2418220 cli_runner.go:164] Run: docker container inspect embed-certs-951478 --format={{.State.Status}}
	I0816 00:12:26.488953 2418220 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35089 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa Username:docker}
	I0816 00:12:26.502024 2418220 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
	I0816 00:12:26.502044 2418220 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I0816 00:12:26.502112 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
	I0816 00:12:26.532453 2418220 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35089 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa Username:docker}
	I0816 00:12:26.772261 2418220 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^        forward . \/etc\/resolv.conf.*/i \        hosts {\n           192.168.76.1 host.minikube.internal\n           fallthrough\n        }' -e '/^        errors *$/i \        log' | sudo /var/lib/minikube/binaries/v1.31.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
	I0816 00:12:26.772420 2418220 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I0816 00:12:26.803521 2418220 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
	I0816 00:12:26.825115 2418220 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
	I0816 00:12:27.696330 2418220 node_ready.go:35] waiting up to 6m0s for node "embed-certs-951478" to be "Ready" ...
	I0816 00:12:27.696426 2418220 start.go:971] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
	I0816 00:12:27.719736 2418220 node_ready.go:49] node "embed-certs-951478" has status "Ready":"True"
	I0816 00:12:27.719759 2418220 node_ready.go:38] duration metric: took 23.306081ms for node "embed-certs-951478" to be "Ready" ...
	I0816 00:12:27.719768 2418220 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0816 00:12:27.740423 2418220 pod_ready.go:79] waiting up to 6m0s for pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace to be "Ready" ...
	I0816 00:12:27.973570 2418220 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.169970424s)
	I0816 00:12:27.973756 2418220 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (1.148610282s)
	I0816 00:12:27.984090 2418220 out.go:177] * Enabled addons: storage-provisioner, default-storageclass
	I0816 00:12:27.338503 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:29.338983 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:27.985790 2418220 addons.go:510] duration metric: took 1.580612175s for enable addons: enabled=[storage-provisioner default-storageclass]
	I0816 00:12:28.201157 2418220 kapi.go:214] "coredns" deployment in "kube-system" namespace and "embed-certs-951478" context rescaled to 1 replicas
	I0816 00:12:29.747298 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:32.246288 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:31.839009 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:34.338847 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:36.339743 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:34.246388 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:36.246902 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:38.839318 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:41.337738 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:38.247610 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:38.747518 2418220 pod_ready.go:98] pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace has status phase "Succeeded" (skipping!): {Phase:Succeeded Conditions:[{Type:PodReadyToStartContainers Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:38 +0000 UTC Reason: Message:} {Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason:PodCompleted Message:} {Type:Ready Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason:PodCompleted Message:} {Type:ContainersReady Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason:PodCompleted Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason: Message:}] Message: Reason: NominatedNodeName: HostIP:192.168.76.2 HostIPs:[{IP:192.168.76.2
}] PodIP:10.244.0.2 PodIPs:[{IP:10.244.0.2}] StartTime:2024-08-16 00:12:26 +0000 UTC InitContainerStatuses:[] ContainerStatuses:[{Name:coredns State:{Waiting:nil Running:nil Terminated:&ContainerStateTerminated{ExitCode:0,Signal:0,Reason:Completed,Message:,StartedAt:2024-08-16 00:12:27 +0000 UTC,FinishedAt:2024-08-16 00:12:38 +0000 UTC,ContainerID:docker://b746a62a75218259e51590ac7adc43053cf61855a3c65759fb80d271c15c076a,}} LastTerminationState:{Waiting:nil Running:nil Terminated:nil} Ready:false RestartCount:0 Image:registry.k8s.io/coredns/coredns:v1.11.1 ImageID:docker-pullable://registry.k8s.io/coredns/coredns@sha256:1eeb4c7316bacb1d4c8ead65571cd92dd21e27359f0d4917f1a5822a73b75db1 ContainerID:docker://b746a62a75218259e51590ac7adc43053cf61855a3c65759fb80d271c15c076a Started:0x4001e6ee00 AllocatedResources:map[] Resources:nil VolumeMounts:[{Name:config-volume MountPath:/etc/coredns ReadOnly:true RecursiveReadOnly:0x4001e45900} {Name:kube-api-access-jsvj4 MountPath:/var/run/secrets/kubernetes.io/serviceaccount
ReadOnly:true RecursiveReadOnly:0x4001e45910}] User:nil AllocatedResourcesStatus:[]}] QOSClass:Burstable EphemeralContainerStatuses:[] Resize: ResourceClaimStatuses:[]}
	I0816 00:12:38.747550 2418220 pod_ready.go:82] duration metric: took 11.007037537s for pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace to be "Ready" ...
	E0816 00:12:38.747563 2418220 pod_ready.go:67] WaitExtra: waitPodCondition: pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace has status phase "Succeeded" (skipping!): {Phase:Succeeded Conditions:[{Type:PodReadyToStartContainers Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:38 +0000 UTC Reason: Message:} {Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason:PodCompleted Message:} {Type:Ready Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason:PodCompleted Message:} {Type:ContainersReady Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason:PodCompleted Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason: Message:}] Message: Reason: NominatedNodeName: HostIP:192.168.7
6.2 HostIPs:[{IP:192.168.76.2}] PodIP:10.244.0.2 PodIPs:[{IP:10.244.0.2}] StartTime:2024-08-16 00:12:26 +0000 UTC InitContainerStatuses:[] ContainerStatuses:[{Name:coredns State:{Waiting:nil Running:nil Terminated:&ContainerStateTerminated{ExitCode:0,Signal:0,Reason:Completed,Message:,StartedAt:2024-08-16 00:12:27 +0000 UTC,FinishedAt:2024-08-16 00:12:38 +0000 UTC,ContainerID:docker://b746a62a75218259e51590ac7adc43053cf61855a3c65759fb80d271c15c076a,}} LastTerminationState:{Waiting:nil Running:nil Terminated:nil} Ready:false RestartCount:0 Image:registry.k8s.io/coredns/coredns:v1.11.1 ImageID:docker-pullable://registry.k8s.io/coredns/coredns@sha256:1eeb4c7316bacb1d4c8ead65571cd92dd21e27359f0d4917f1a5822a73b75db1 ContainerID:docker://b746a62a75218259e51590ac7adc43053cf61855a3c65759fb80d271c15c076a Started:0x4001e6ee00 AllocatedResources:map[] Resources:nil VolumeMounts:[{Name:config-volume MountPath:/etc/coredns ReadOnly:true RecursiveReadOnly:0x4001e45900} {Name:kube-api-access-jsvj4 MountPath:/var/run/secrets
/kubernetes.io/serviceaccount ReadOnly:true RecursiveReadOnly:0x4001e45910}] User:nil AllocatedResourcesStatus:[]}] QOSClass:Burstable EphemeralContainerStatuses:[] Resize: ResourceClaimStatuses:[]}
	I0816 00:12:38.747571 2418220 pod_ready.go:79] waiting up to 6m0s for pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace to be "Ready" ...
	I0816 00:12:40.753844 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:42.754578 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:43.338933 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:45.838487 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:45.255150 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:47.754713 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:47.840406 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:50.337729 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:50.254382 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:52.754042 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:52.337948 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:52.337979 2407112 pod_ready.go:82] duration metric: took 4m0.00579824s for pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace to be "Ready" ...
	E0816 00:12:52.337989 2407112 pod_ready.go:67] WaitExtra: waitPodCondition: context deadline exceeded
	I0816 00:12:52.337996 2407112 pod_ready.go:39] duration metric: took 5m29.714095578s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0816 00:12:52.338015 2407112 api_server.go:52] waiting for apiserver process to appear ...
	I0816 00:12:52.338092 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
	I0816 00:12:52.356755 2407112 logs.go:276] 2 containers: [682baec10b08 3d14903eaff5]
	I0816 00:12:52.356831 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
	I0816 00:12:52.374920 2407112 logs.go:276] 2 containers: [5aacba0afc73 15f34ed96b2b]
	I0816 00:12:52.375012 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
	I0816 00:12:52.394135 2407112 logs.go:276] 2 containers: [0646646e2348 f583e4715841]
	I0816 00:12:52.394219 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
	I0816 00:12:52.411860 2407112 logs.go:276] 2 containers: [67be7ec054c6 003fa784026a]
	I0816 00:12:52.411939 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
	I0816 00:12:52.430627 2407112 logs.go:276] 2 containers: [c5eeddd51e95 1e79f4e5d490]
	I0816 00:12:52.430718 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
	I0816 00:12:52.448774 2407112 logs.go:276] 2 containers: [cc3ceefdfcf9 821653363c67]
	I0816 00:12:52.448883 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
	I0816 00:12:52.467313 2407112 logs.go:276] 0 containers: []
	W0816 00:12:52.467337 2407112 logs.go:278] No container was found matching "kindnet"
	I0816 00:12:52.467406 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
	I0816 00:12:52.488664 2407112 logs.go:276] 2 containers: [a6efcfb5cb17 de096650c620]
	I0816 00:12:52.488759 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kubernetes-dashboard --format={{.ID}}
	I0816 00:12:52.508042 2407112 logs.go:276] 1 containers: [3ef1e388df06]
	I0816 00:12:52.508089 2407112 logs.go:123] Gathering logs for kube-apiserver [3d14903eaff5] ...
	I0816 00:12:52.508101 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3d14903eaff5"
	I0816 00:12:52.575393 2407112 logs.go:123] Gathering logs for etcd [15f34ed96b2b] ...
	I0816 00:12:52.575429 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 15f34ed96b2b"
	I0816 00:12:52.609501 2407112 logs.go:123] Gathering logs for kube-proxy [1e79f4e5d490] ...
	I0816 00:12:52.609527 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 1e79f4e5d490"
	I0816 00:12:52.632173 2407112 logs.go:123] Gathering logs for kube-controller-manager [cc3ceefdfcf9] ...
	I0816 00:12:52.632251 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 cc3ceefdfcf9"
	I0816 00:12:52.680493 2407112 logs.go:123] Gathering logs for Docker ...
	I0816 00:12:52.680527 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
	I0816 00:12:52.708209 2407112 logs.go:123] Gathering logs for dmesg ...
	I0816 00:12:52.708242 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
	I0816 00:12:52.725031 2407112 logs.go:123] Gathering logs for describe nodes ...
	I0816 00:12:52.725068 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
	I0816 00:12:52.888699 2407112 logs.go:123] Gathering logs for kube-apiserver [682baec10b08] ...
	I0816 00:12:52.888730 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 682baec10b08"
	I0816 00:12:52.947378 2407112 logs.go:123] Gathering logs for etcd [5aacba0afc73] ...
	I0816 00:12:52.947415 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 5aacba0afc73"
	I0816 00:12:52.984633 2407112 logs.go:123] Gathering logs for kube-controller-manager [821653363c67] ...
	I0816 00:12:52.984685 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 821653363c67"
	I0816 00:12:53.041565 2407112 logs.go:123] Gathering logs for kubernetes-dashboard [3ef1e388df06] ...
	I0816 00:12:53.041610 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3ef1e388df06"
	I0816 00:12:53.063527 2407112 logs.go:123] Gathering logs for kubelet ...
	I0816 00:12:53.063556 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
	W0816 00:12:53.119952 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.595590    1361 reflector.go:138] object-"kube-system"/"kube-proxy-token-7vfmt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kube-proxy-token-7vfmt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.120210 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.596830    1361 reflector.go:138] object-"kube-system"/"coredns": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.120436 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597148    1361 reflector.go:138] object-"kube-system"/"coredns-token-xzs4d": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "coredns-token-xzs4d" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.120668 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597346    1361 reflector.go:138] object-"kube-system"/"metrics-server-token-545hd": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "metrics-server-token-545hd" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.120885 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597537    1361 reflector.go:138] object-"kube-system"/"kube-proxy": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.121122 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597780    1361 reflector.go:138] object-"kube-system"/"storage-provisioner-token-7rcl5": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "storage-provisioner-token-7rcl5" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.121338 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597976    1361 reflector.go:138] object-"default"/"default-token-zv2bb": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "default-token-zv2bb" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.131353 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.865188    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:12:53.131864 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.961596    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.132568 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:27 old-k8s-version-894472 kubelet[1361]: E0816 00:07:27.069241    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.135723 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:40 old-k8s-version-894472 kubelet[1361]: E0816 00:07:40.936997    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:12:53.136063 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:41 old-k8s-version-894472 kubelet[1361]: E0816 00:07:41.174078    1361 reflector.go:138] object-"kubernetes-dashboard"/"kubernetes-dashboard-token-2w5nt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kubernetes-dashboard-token-2w5nt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kubernetes-dashboard": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:12:53.140916 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:48 old-k8s-version-894472 kubelet[1361]: E0816 00:07:48.480988    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:12:53.141483 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:49 old-k8s-version-894472 kubelet[1361]: E0816 00:07:49.497915    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.141711 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:52 old-k8s-version-894472 kubelet[1361]: E0816 00:07:52.902604    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.142184 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:56 old-k8s-version-894472 kubelet[1361]: E0816 00:07:56.570944    1361 pod_workers.go:191] Error syncing pod 3c69c2c8-274b-42e9-83f4-e56b1a377a84 ("storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 10s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"
	W0816 00:12:53.144872 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.540574    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:12:53.147012 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.937109    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:12:53.147341 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.906978    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.147546 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.907490    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.147738 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:33 old-k8s-version-894472 kubelet[1361]: E0816 00:08:33.905951    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.150049 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:34 old-k8s-version-894472 kubelet[1361]: E0816 00:08:34.587830    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:12:53.150259 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.913192    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.152418 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.930752    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:12:53.152621 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:59 old-k8s-version-894472 kubelet[1361]: E0816 00:08:59.902173    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.152813 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:01 old-k8s-version-894472 kubelet[1361]: E0816 00:09:01.906082    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.153018 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:10 old-k8s-version-894472 kubelet[1361]: E0816 00:09:10.903130    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.153211 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:13 old-k8s-version-894472 kubelet[1361]: E0816 00:09:13.910937    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.155518 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:22 old-k8s-version-894472 kubelet[1361]: E0816 00:09:22.525085    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:12:53.155709 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:28 old-k8s-version-894472 kubelet[1361]: E0816 00:09:28.902818    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.155918 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:36 old-k8s-version-894472 kubelet[1361]: E0816 00:09:36.902337    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.156108 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:42 old-k8s-version-894472 kubelet[1361]: E0816 00:09:42.908410    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.156310 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:50 old-k8s-version-894472 kubelet[1361]: E0816 00:09:50.917752    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.156500 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:53 old-k8s-version-894472 kubelet[1361]: E0816 00:09:53.902385    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.156706 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:02 old-k8s-version-894472 kubelet[1361]: E0816 00:10:02.902250    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.156909 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:07 old-k8s-version-894472 kubelet[1361]: E0816 00:10:07.902036    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.157115 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:13 old-k8s-version-894472 kubelet[1361]: E0816 00:10:13.908849    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.159233 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:22 old-k8s-version-894472 kubelet[1361]: E0816 00:10:22.924390    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:12:53.159438 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:25 old-k8s-version-894472 kubelet[1361]: E0816 00:10:25.902239    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.159639 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.903386    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.159827 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.904171    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.160016 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:51 old-k8s-version-894472 kubelet[1361]: E0816 00:10:51.902209    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.162349 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458905    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:12:53.162553 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.902219    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.162745 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.905343    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.162934 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:16 old-k8s-version-894472 kubelet[1361]: E0816 00:11:16.908006    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.163136 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:17 old-k8s-version-894472 kubelet[1361]: E0816 00:11:17.912657    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.163324 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:31 old-k8s-version-894472 kubelet[1361]: E0816 00:11:31.902307    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.163546 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:32 old-k8s-version-894472 kubelet[1361]: E0816 00:11:32.902285    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.163735 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:43 old-k8s-version-894472 kubelet[1361]: E0816 00:11:43.902913    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.163937 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:47 old-k8s-version-894472 kubelet[1361]: E0816 00:11:47.902276    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.164127 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:56 old-k8s-version-894472 kubelet[1361]: E0816 00:11:56.907938    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.164331 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:01 old-k8s-version-894472 kubelet[1361]: E0816 00:12:01.910769    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.164519 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:07 old-k8s-version-894472 kubelet[1361]: E0816 00:12:07.902409    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.164729 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:12 old-k8s-version-894472 kubelet[1361]: E0816 00:12:12.925048    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.164919 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:18 old-k8s-version-894472 kubelet[1361]: E0816 00:12:18.903513    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.165121 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.165309 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.165511 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.165721 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.165909 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	I0816 00:12:53.165921 2407112 logs.go:123] Gathering logs for coredns [f583e4715841] ...
	I0816 00:12:53.165936 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 f583e4715841"
	I0816 00:12:53.191198 2407112 logs.go:123] Gathering logs for container status ...
	I0816 00:12:53.191227 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
	I0816 00:12:53.281304 2407112 logs.go:123] Gathering logs for coredns [0646646e2348] ...
	I0816 00:12:53.281343 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 0646646e2348"
	I0816 00:12:53.302996 2407112 logs.go:123] Gathering logs for kube-scheduler [003fa784026a] ...
	I0816 00:12:53.303072 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 003fa784026a"
	I0816 00:12:53.328690 2407112 logs.go:123] Gathering logs for kube-proxy [c5eeddd51e95] ...
	I0816 00:12:53.328718 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 c5eeddd51e95"
	I0816 00:12:53.351111 2407112 logs.go:123] Gathering logs for storage-provisioner [a6efcfb5cb17] ...
	I0816 00:12:53.351142 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a6efcfb5cb17"
	I0816 00:12:53.372302 2407112 logs.go:123] Gathering logs for storage-provisioner [de096650c620] ...
	I0816 00:12:53.372331 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 de096650c620"
	I0816 00:12:53.392435 2407112 logs.go:123] Gathering logs for kube-scheduler [67be7ec054c6] ...
	I0816 00:12:53.392464 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 67be7ec054c6"
	I0816 00:12:53.414931 2407112 out.go:358] Setting ErrFile to fd 2...
	I0816 00:12:53.414956 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
	W0816 00:12:53.415035 2407112 out.go:270] X Problems detected in kubelet:
	W0816 00:12:53.415081 2407112 out.go:270]   Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.415095 2407112 out.go:270]   Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.415117 2407112 out.go:270]   Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.415131 2407112 out.go:270]   Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:12:53.415139 2407112 out.go:270]   Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	I0816 00:12:53.415178 2407112 out.go:358] Setting ErrFile to fd 2...
	I0816 00:12:53.415187 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0816 00:12:54.754245 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:57.253340 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
	I0816 00:12:59.253395 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
	I0816 00:13:01.254735 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
	I0816 00:13:03.417016 2407112 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0816 00:13:03.428859 2407112 api_server.go:72] duration metric: took 5m57.651838639s to wait for apiserver process to appear ...
	I0816 00:13:03.428887 2407112 api_server.go:88] waiting for apiserver healthz status ...
	I0816 00:13:03.428962 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
	I0816 00:13:03.446552 2407112 logs.go:276] 2 containers: [682baec10b08 3d14903eaff5]
	I0816 00:13:03.446627 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
	I0816 00:13:03.463688 2407112 logs.go:276] 2 containers: [5aacba0afc73 15f34ed96b2b]
	I0816 00:13:03.463770 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
	I0816 00:13:03.485222 2407112 logs.go:276] 2 containers: [0646646e2348 f583e4715841]
	I0816 00:13:03.485299 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
	I0816 00:13:03.503320 2407112 logs.go:276] 2 containers: [67be7ec054c6 003fa784026a]
	I0816 00:13:03.503399 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
	I0816 00:13:03.522597 2407112 logs.go:276] 2 containers: [c5eeddd51e95 1e79f4e5d490]
	I0816 00:13:03.522681 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
	I0816 00:13:03.542588 2407112 logs.go:276] 2 containers: [cc3ceefdfcf9 821653363c67]
	I0816 00:13:03.542672 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
	I0816 00:13:03.566735 2407112 logs.go:276] 0 containers: []
	W0816 00:13:03.566759 2407112 logs.go:278] No container was found matching "kindnet"
	I0816 00:13:03.566817 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kubernetes-dashboard --format={{.ID}}
	I0816 00:13:03.585575 2407112 logs.go:276] 1 containers: [3ef1e388df06]
	I0816 00:13:03.585709 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
	I0816 00:13:03.604527 2407112 logs.go:276] 2 containers: [a6efcfb5cb17 de096650c620]
	I0816 00:13:03.604570 2407112 logs.go:123] Gathering logs for container status ...
	I0816 00:13:03.604588 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
	I0816 00:13:03.669008 2407112 logs.go:123] Gathering logs for kube-apiserver [682baec10b08] ...
	I0816 00:13:03.669037 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 682baec10b08"
	I0816 00:13:03.712566 2407112 logs.go:123] Gathering logs for etcd [5aacba0afc73] ...
	I0816 00:13:03.712600 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 5aacba0afc73"
	I0816 00:13:03.737934 2407112 logs.go:123] Gathering logs for etcd [15f34ed96b2b] ...
	I0816 00:13:03.738002 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 15f34ed96b2b"
	I0816 00:13:03.771409 2407112 logs.go:123] Gathering logs for coredns [f583e4715841] ...
	I0816 00:13:03.771482 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 f583e4715841"
	I0816 00:13:03.797096 2407112 logs.go:123] Gathering logs for kube-scheduler [67be7ec054c6] ...
	I0816 00:13:03.797128 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 67be7ec054c6"
	I0816 00:13:03.826332 2407112 logs.go:123] Gathering logs for kube-controller-manager [cc3ceefdfcf9] ...
	I0816 00:13:03.826367 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 cc3ceefdfcf9"
	I0816 00:13:03.865698 2407112 logs.go:123] Gathering logs for Docker ...
	I0816 00:13:03.865729 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
	I0816 00:13:03.892350 2407112 logs.go:123] Gathering logs for kubelet ...
	I0816 00:13:03.892378 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
	W0816 00:13:03.954017 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.595590    1361 reflector.go:138] object-"kube-system"/"kube-proxy-token-7vfmt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kube-proxy-token-7vfmt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.954368 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.596830    1361 reflector.go:138] object-"kube-system"/"coredns": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.954651 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597148    1361 reflector.go:138] object-"kube-system"/"coredns-token-xzs4d": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "coredns-token-xzs4d" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.954880 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597346    1361 reflector.go:138] object-"kube-system"/"metrics-server-token-545hd": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "metrics-server-token-545hd" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.955093 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597537    1361 reflector.go:138] object-"kube-system"/"kube-proxy": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.955328 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597780    1361 reflector.go:138] object-"kube-system"/"storage-provisioner-token-7rcl5": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "storage-provisioner-token-7rcl5" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.955543 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597976    1361 reflector.go:138] object-"default"/"default-token-zv2bb": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "default-token-zv2bb" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.966447 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.865188    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:13:03.967038 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.961596    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.967719 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:27 old-k8s-version-894472 kubelet[1361]: E0816 00:07:27.069241    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.970902 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:40 old-k8s-version-894472 kubelet[1361]: E0816 00:07:40.936997    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:13:03.971269 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:41 old-k8s-version-894472 kubelet[1361]: E0816 00:07:41.174078    1361 reflector.go:138] object-"kubernetes-dashboard"/"kubernetes-dashboard-token-2w5nt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kubernetes-dashboard-token-2w5nt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kubernetes-dashboard": no relationship found between node 'old-k8s-version-894472' and this object
	W0816 00:13:03.975634 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:48 old-k8s-version-894472 kubelet[1361]: E0816 00:07:48.480988    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:13:03.976189 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:49 old-k8s-version-894472 kubelet[1361]: E0816 00:07:49.497915    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.976380 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:52 old-k8s-version-894472 kubelet[1361]: E0816 00:07:52.902604    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.976839 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:56 old-k8s-version-894472 kubelet[1361]: E0816 00:07:56.570944    1361 pod_workers.go:191] Error syncing pod 3c69c2c8-274b-42e9-83f4-e56b1a377a84 ("storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 10s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"
	W0816 00:13:03.979485 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.540574    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:13:03.981633 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.937109    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:13:03.981960 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.906978    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.982163 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.907490    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.982356 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:33 old-k8s-version-894472 kubelet[1361]: E0816 00:08:33.905951    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.984654 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:34 old-k8s-version-894472 kubelet[1361]: E0816 00:08:34.587830    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:13:03.984859 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.913192    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.986993 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.930752    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:13:03.987196 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:59 old-k8s-version-894472 kubelet[1361]: E0816 00:08:59.902173    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.987384 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:01 old-k8s-version-894472 kubelet[1361]: E0816 00:09:01.906082    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.987586 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:10 old-k8s-version-894472 kubelet[1361]: E0816 00:09:10.903130    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.987776 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:13 old-k8s-version-894472 kubelet[1361]: E0816 00:09:13.910937    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.990067 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:22 old-k8s-version-894472 kubelet[1361]: E0816 00:09:22.525085    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:13:03.990260 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:28 old-k8s-version-894472 kubelet[1361]: E0816 00:09:28.902818    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.990464 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:36 old-k8s-version-894472 kubelet[1361]: E0816 00:09:36.902337    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.990653 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:42 old-k8s-version-894472 kubelet[1361]: E0816 00:09:42.908410    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.990862 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:50 old-k8s-version-894472 kubelet[1361]: E0816 00:09:50.917752    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.991053 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:53 old-k8s-version-894472 kubelet[1361]: E0816 00:09:53.902385    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.991255 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:02 old-k8s-version-894472 kubelet[1361]: E0816 00:10:02.902250    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.991445 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:07 old-k8s-version-894472 kubelet[1361]: E0816 00:10:07.902036    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.991646 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:13 old-k8s-version-894472 kubelet[1361]: E0816 00:10:13.908849    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.993804 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:22 old-k8s-version-894472 kubelet[1361]: E0816 00:10:22.924390    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	W0816 00:13:03.994007 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:25 old-k8s-version-894472 kubelet[1361]: E0816 00:10:25.902239    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.994210 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.903386    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.994398 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.904171    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.994591 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:51 old-k8s-version-894472 kubelet[1361]: E0816 00:10:51.902209    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.996873 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458905    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	W0816 00:13:03.997075 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.902219    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.997263 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.905343    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.997451 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:16 old-k8s-version-894472 kubelet[1361]: E0816 00:11:16.908006    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.997659 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:17 old-k8s-version-894472 kubelet[1361]: E0816 00:11:17.912657    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.997848 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:31 old-k8s-version-894472 kubelet[1361]: E0816 00:11:31.902307    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.998049 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:32 old-k8s-version-894472 kubelet[1361]: E0816 00:11:32.902285    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.998237 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:43 old-k8s-version-894472 kubelet[1361]: E0816 00:11:43.902913    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.998439 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:47 old-k8s-version-894472 kubelet[1361]: E0816 00:11:47.902276    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.998629 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:56 old-k8s-version-894472 kubelet[1361]: E0816 00:11:56.907938    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.998831 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:01 old-k8s-version-894472 kubelet[1361]: E0816 00:12:01.910769    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.999022 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:07 old-k8s-version-894472 kubelet[1361]: E0816 00:12:07.902409    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.999225 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:12 old-k8s-version-894472 kubelet[1361]: E0816 00:12:12.925048    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.999414 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:18 old-k8s-version-894472 kubelet[1361]: E0816 00:12:18.903513    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.999616 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:03.999804 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.000006 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.000207 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.000397 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.000599 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:58 old-k8s-version-894472 kubelet[1361]: E0816 00:12:58.902545    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.000789 2407112 logs.go:138] Found kubelet problem: Aug 16 00:13:02 old-k8s-version-894472 kubelet[1361]: E0816 00:13:02.903209    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	I0816 00:13:04.000801 2407112 logs.go:123] Gathering logs for dmesg ...
	I0816 00:13:04.000816 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
	I0816 00:13:04.020061 2407112 logs.go:123] Gathering logs for kubernetes-dashboard [3ef1e388df06] ...
	I0816 00:13:04.020091 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3ef1e388df06"
	I0816 00:13:04.044867 2407112 logs.go:123] Gathering logs for storage-provisioner [a6efcfb5cb17] ...
	I0816 00:13:04.044946 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a6efcfb5cb17"
	I0816 00:13:04.076966 2407112 logs.go:123] Gathering logs for describe nodes ...
	I0816 00:13:04.077044 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
	I0816 00:13:04.233528 2407112 logs.go:123] Gathering logs for coredns [0646646e2348] ...
	I0816 00:13:04.233554 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 0646646e2348"
	I0816 00:13:04.259485 2407112 logs.go:123] Gathering logs for kube-proxy [1e79f4e5d490] ...
	I0816 00:13:04.259515 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 1e79f4e5d490"
	I0816 00:13:04.286001 2407112 logs.go:123] Gathering logs for kube-apiserver [3d14903eaff5] ...
	I0816 00:13:04.286028 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3d14903eaff5"
	I0816 00:13:04.367586 2407112 logs.go:123] Gathering logs for kube-scheduler [003fa784026a] ...
	I0816 00:13:04.367621 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 003fa784026a"
	I0816 00:13:04.398199 2407112 logs.go:123] Gathering logs for kube-proxy [c5eeddd51e95] ...
	I0816 00:13:04.398227 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 c5eeddd51e95"
	I0816 00:13:04.420433 2407112 logs.go:123] Gathering logs for kube-controller-manager [821653363c67] ...
	I0816 00:13:04.420463 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 821653363c67"
	I0816 00:13:04.480676 2407112 logs.go:123] Gathering logs for storage-provisioner [de096650c620] ...
	I0816 00:13:04.480722 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 de096650c620"
	I0816 00:13:04.501321 2407112 out.go:358] Setting ErrFile to fd 2...
	I0816 00:13:04.501350 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
	W0816 00:13:04.501418 2407112 out.go:270] X Problems detected in kubelet:
	W0816 00:13:04.501441 2407112 out.go:270]   Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.501458 2407112 out.go:270]   Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.501468 2407112 out.go:270]   Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.501473 2407112 out.go:270]   Aug 16 00:12:58 old-k8s-version-894472 kubelet[1361]: E0816 00:12:58.902545    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	W0816 00:13:04.501479 2407112 out.go:270]   Aug 16 00:13:02 old-k8s-version-894472 kubelet[1361]: E0816 00:13:02.903209    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	I0816 00:13:04.501496 2407112 out.go:358] Setting ErrFile to fd 2...
	I0816 00:13:04.501501 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0816 00:13:03.753991 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
	I0816 00:13:06.254027 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
	I0816 00:13:08.254181 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
	I0816 00:13:09.253067 2418220 pod_ready.go:93] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"True"
	I0816 00:13:09.253094 2418220 pod_ready.go:82] duration metric: took 30.505514285s for pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace to be "Ready" ...
	I0816 00:13:09.253108 2418220 pod_ready.go:79] waiting up to 6m0s for pod "etcd-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
	I0816 00:13:09.258028 2418220 pod_ready.go:93] pod "etcd-embed-certs-951478" in "kube-system" namespace has status "Ready":"True"
	I0816 00:13:09.258053 2418220 pod_ready.go:82] duration metric: took 4.911629ms for pod "etcd-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
	I0816 00:13:09.258064 2418220 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
	I0816 00:13:09.263121 2418220 pod_ready.go:93] pod "kube-apiserver-embed-certs-951478" in "kube-system" namespace has status "Ready":"True"
	I0816 00:13:09.263149 2418220 pod_ready.go:82] duration metric: took 5.075211ms for pod "kube-apiserver-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
	I0816 00:13:09.263160 2418220 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
	I0816 00:13:09.268055 2418220 pod_ready.go:93] pod "kube-controller-manager-embed-certs-951478" in "kube-system" namespace has status "Ready":"True"
	I0816 00:13:09.268076 2418220 pod_ready.go:82] duration metric: took 4.908872ms for pod "kube-controller-manager-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
	I0816 00:13:09.268087 2418220 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-7wwq6" in "kube-system" namespace to be "Ready" ...
	I0816 00:13:09.273803 2418220 pod_ready.go:93] pod "kube-proxy-7wwq6" in "kube-system" namespace has status "Ready":"True"
	I0816 00:13:09.273831 2418220 pod_ready.go:82] duration metric: took 5.7373ms for pod "kube-proxy-7wwq6" in "kube-system" namespace to be "Ready" ...
	I0816 00:13:09.273843 2418220 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
	I0816 00:13:09.651423 2418220 pod_ready.go:93] pod "kube-scheduler-embed-certs-951478" in "kube-system" namespace has status "Ready":"True"
	I0816 00:13:09.651449 2418220 pod_ready.go:82] duration metric: took 377.579145ms for pod "kube-scheduler-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
	I0816 00:13:09.651461 2418220 pod_ready.go:39] duration metric: took 41.931674932s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
	I0816 00:13:09.651480 2418220 api_server.go:52] waiting for apiserver process to appear ...
	I0816 00:13:09.651558 2418220 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0816 00:13:09.663678 2418220 api_server.go:72] duration metric: took 43.258913829s to wait for apiserver process to appear ...
	I0816 00:13:09.663702 2418220 api_server.go:88] waiting for apiserver healthz status ...
	I0816 00:13:09.663726 2418220 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
	I0816 00:13:09.672527 2418220 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
	ok
	I0816 00:13:09.673658 2418220 api_server.go:141] control plane version: v1.31.0
	I0816 00:13:09.673719 2418220 api_server.go:131] duration metric: took 10.010265ms to wait for apiserver health ...
	I0816 00:13:09.673743 2418220 system_pods.go:43] waiting for kube-system pods to appear ...
	I0816 00:13:09.854087 2418220 system_pods.go:59] 7 kube-system pods found
	I0816 00:13:09.854122 2418220 system_pods.go:61] "coredns-6f6b679f8f-s552c" [32775d66-0ddd-4cc8-bf05-f0cb75e9a8ac] Running
	I0816 00:13:09.854129 2418220 system_pods.go:61] "etcd-embed-certs-951478" [3f6bf602-b1c9-40ad-a396-83b58b0a2343] Running
	I0816 00:13:09.854133 2418220 system_pods.go:61] "kube-apiserver-embed-certs-951478" [ecb86650-3921-442b-af91-729bad2fcf40] Running
	I0816 00:13:09.854138 2418220 system_pods.go:61] "kube-controller-manager-embed-certs-951478" [53d4d44c-f816-46f6-8cbb-1444e8e7f574] Running
	I0816 00:13:09.854142 2418220 system_pods.go:61] "kube-proxy-7wwq6" [bf244e41-6f4a-453f-a049-3978b84f7a6e] Running
	I0816 00:13:09.854146 2418220 system_pods.go:61] "kube-scheduler-embed-certs-951478" [fe82d9af-0754-4162-b906-b9b03fc21a2b] Running
	I0816 00:13:09.854152 2418220 system_pods.go:61] "storage-provisioner" [0514920d-c907-40ed-b2d7-a27c32f0a5dd] Running
	I0816 00:13:09.854158 2418220 system_pods.go:74] duration metric: took 180.396959ms to wait for pod list to return data ...
	I0816 00:13:09.854167 2418220 default_sa.go:34] waiting for default service account to be created ...
	I0816 00:13:10.051804 2418220 default_sa.go:45] found service account: "default"
	I0816 00:13:10.051834 2418220 default_sa.go:55] duration metric: took 197.657145ms for default service account to be created ...
	I0816 00:13:10.051845 2418220 system_pods.go:116] waiting for k8s-apps to be running ...
	I0816 00:13:10.254618 2418220 system_pods.go:86] 7 kube-system pods found
	I0816 00:13:10.254678 2418220 system_pods.go:89] "coredns-6f6b679f8f-s552c" [32775d66-0ddd-4cc8-bf05-f0cb75e9a8ac] Running
	I0816 00:13:10.254718 2418220 system_pods.go:89] "etcd-embed-certs-951478" [3f6bf602-b1c9-40ad-a396-83b58b0a2343] Running
	I0816 00:13:10.254737 2418220 system_pods.go:89] "kube-apiserver-embed-certs-951478" [ecb86650-3921-442b-af91-729bad2fcf40] Running
	I0816 00:13:10.254752 2418220 system_pods.go:89] "kube-controller-manager-embed-certs-951478" [53d4d44c-f816-46f6-8cbb-1444e8e7f574] Running
	I0816 00:13:10.254763 2418220 system_pods.go:89] "kube-proxy-7wwq6" [bf244e41-6f4a-453f-a049-3978b84f7a6e] Running
	I0816 00:13:10.254772 2418220 system_pods.go:89] "kube-scheduler-embed-certs-951478" [fe82d9af-0754-4162-b906-b9b03fc21a2b] Running
	I0816 00:13:10.254777 2418220 system_pods.go:89] "storage-provisioner" [0514920d-c907-40ed-b2d7-a27c32f0a5dd] Running
	I0816 00:13:10.254802 2418220 system_pods.go:126] duration metric: took 202.934402ms to wait for k8s-apps to be running ...
	I0816 00:13:10.254821 2418220 system_svc.go:44] waiting for kubelet service to be running ....
	I0816 00:13:10.254897 2418220 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0816 00:13:10.271059 2418220 system_svc.go:56] duration metric: took 16.228932ms WaitForService to wait for kubelet
	I0816 00:13:10.271088 2418220 kubeadm.go:582] duration metric: took 43.866329484s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I0816 00:13:10.271138 2418220 node_conditions.go:102] verifying NodePressure condition ...
	I0816 00:13:10.451670 2418220 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
	I0816 00:13:10.451702 2418220 node_conditions.go:123] node cpu capacity is 2
	I0816 00:13:10.451714 2418220 node_conditions.go:105] duration metric: took 180.570519ms to run NodePressure ...
	I0816 00:13:10.451745 2418220 start.go:241] waiting for startup goroutines ...
	I0816 00:13:10.451763 2418220 start.go:246] waiting for cluster config update ...
	I0816 00:13:10.451774 2418220 start.go:255] writing updated cluster config ...
	I0816 00:13:10.452065 2418220 ssh_runner.go:195] Run: rm -f paused
	I0816 00:13:10.508487 2418220 start.go:600] kubectl: 1.31.0, cluster: 1.31.0 (minor skew: 0)
	I0816 00:13:10.511115 2418220 out.go:177] * Done! kubectl is now configured to use "embed-certs-951478" cluster and "default" namespace by default
	I0816 00:13:14.507900 2407112 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
	I0816 00:13:14.517839 2407112 api_server.go:279] https://192.168.85.2:8443/healthz returned 200:
	ok
	I0816 00:13:14.520292 2407112 out.go:201] 
	W0816 00:13:14.522384 2407112 out.go:270] X Exiting due to K8S_UNHEALTHY_CONTROL_PLANE: wait 6m0s for node: wait for healthy API server: controlPlane never updated to v1.20.0
	W0816 00:13:14.522420 2407112 out.go:270] * Suggestion: Control Plane could not update, try minikube delete --all --purge
	W0816 00:13:14.522438 2407112 out.go:270] * Related issue: https://github.com/kubernetes/minikube/issues/11417
	W0816 00:13:14.522444 2407112 out.go:270] * 
	W0816 00:13:14.523417 2407112 out.go:293] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯
	I0816 00:13:14.525402 2407112 out.go:201] 
	
	
	==> Docker <==
	Aug 16 00:08:04 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:04.241144445Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
	Aug 16 00:08:04 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:04.537789089Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
	Aug 16 00:08:04 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:04.537903195Z" level=warning msg="[DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
	Aug 16 00:08:04 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:04.537937237Z" level=info msg="Attempting next endpoint for pull after error: [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	Aug 16 00:08:04 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:04.919362673Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	Aug 16 00:08:04 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:04.919682493Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	Aug 16 00:08:04 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:04.927444655Z" level=error msg="Handler for POST /v1.40/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	Aug 16 00:08:34 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:34.286531977Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
	Aug 16 00:08:34 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:34.584806623Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
	Aug 16 00:08:34 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:34.584908578Z" level=warning msg="[DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
	Aug 16 00:08:34 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:34.584940224Z" level=info msg="Attempting next endpoint for pull after error: [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	Aug 16 00:08:48 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:48.927115931Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	Aug 16 00:08:48 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:48.927565438Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	Aug 16 00:08:48 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:48.930109788Z" level=error msg="Handler for POST /v1.40/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	Aug 16 00:09:22 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:09:22.242821029Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
	Aug 16 00:09:22 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:09:22.522302090Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
	Aug 16 00:09:22 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:09:22.522416410Z" level=warning msg="[DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
	Aug 16 00:09:22 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:09:22.522448885Z" level=info msg="Attempting next endpoint for pull after error: [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	Aug 16 00:10:22 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:10:22.919793702Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	Aug 16 00:10:22 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:10:22.919838025Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	Aug 16 00:10:22 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:10:22.923647026Z" level=error msg="Handler for POST /v1.40/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
	Aug 16 00:10:53 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:10:53.154537815Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
	Aug 16 00:10:53 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:10:53.456017089Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
	Aug 16 00:10:53 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:10:53.456113037Z" level=warning msg="[DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
	Aug 16 00:10:53 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:10:53.456152683Z" level=info msg="Attempting next endpoint for pull after error: [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	
	
	==> container status <==
	CONTAINER           IMAGE                                                                                                 CREATED             STATE               NAME                      ATTEMPT             POD ID              POD
	a6efcfb5cb17e       ba04bb24b9575                                                                                         5 minutes ago       Running             storage-provisioner       2                   b2d7da83399c4       storage-provisioner
	3ef1e388df06a       kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93        5 minutes ago       Running             kubernetes-dashboard      0                   d92212b423133       kubernetes-dashboard-cd95d586-k7zq2
	0646646e23483       db91994f4ee8f                                                                                         5 minutes ago       Running             coredns                   1                   cd221ea4db9ee       coredns-74ff55c5b-jrtq9
	de096650c6200       ba04bb24b9575                                                                                         5 minutes ago       Exited              storage-provisioner       1                   b2d7da83399c4       storage-provisioner
	0a4983d325c06       1611cd07b61d5                                                                                         5 minutes ago       Running             busybox                   1                   af72f54bb71ea       busybox
	c5eeddd51e956       25a5233254979                                                                                         5 minutes ago       Running             kube-proxy                1                   03bef86fbdbfd       kube-proxy-4n8ls
	682baec10b080       2c08bbbc02d3a                                                                                         6 minutes ago       Running             kube-apiserver            1                   f451c1ef1172b       kube-apiserver-old-k8s-version-894472
	5aacba0afc730       05b738aa1bc63                                                                                         6 minutes ago       Running             etcd                      1                   1220260c41c8e       etcd-old-k8s-version-894472
	cc3ceefdfcf91       1df8a2b116bd1                                                                                         6 minutes ago       Running             kube-controller-manager   1                   49abbaab55b55       kube-controller-manager-old-k8s-version-894472
	67be7ec054c6a       e7605f88f17d6                                                                                         6 minutes ago       Running             kube-scheduler            1                   adb7fa2a5b6ad       kube-scheduler-old-k8s-version-894472
	77aa51e8bfe29       gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e   6 minutes ago       Exited              busybox                   0                   07ecfca802b01       busybox
	f583e47158417       db91994f4ee8f                                                                                         8 minutes ago       Exited              coredns                   0                   c1b510b003f6e       coredns-74ff55c5b-jrtq9
	1e79f4e5d490f       25a5233254979                                                                                         8 minutes ago       Exited              kube-proxy                0                   66481e83ee2c5       kube-proxy-4n8ls
	003fa784026ad       e7605f88f17d6                                                                                         8 minutes ago       Exited              kube-scheduler            0                   49887af6e9c3f       kube-scheduler-old-k8s-version-894472
	821653363c672       1df8a2b116bd1                                                                                         8 minutes ago       Exited              kube-controller-manager   0                   c1c603fc6e77b       kube-controller-manager-old-k8s-version-894472
	3d14903eaff5d       2c08bbbc02d3a                                                                                         8 minutes ago       Exited              kube-apiserver            0                   857b0611a6255       kube-apiserver-old-k8s-version-894472
	15f34ed96b2b9       05b738aa1bc63                                                                                         8 minutes ago       Exited              etcd                      0                   50f3363af98c7       etcd-old-k8s-version-894472
	
	
	==> coredns [0646646e2348] <==
	.:53
	[INFO] plugin/reload: Running configuration MD5 = 093a0bf1423dd8c4eee62372bb216168
	CoreDNS-1.7.0
	linux/arm64, go1.14.4, f59c03d
	[INFO] 127.0.0.1:35652 - 19807 "HINFO IN 5672222126629147568.7702291352317514369. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.049405724s
	
	
	==> coredns [f583e4715841] <==
	.:53
	[INFO] plugin/reload: Running configuration MD5 = db32ca3650231d74073ff4cf814959a7
	CoreDNS-1.7.0
	linux/arm64, go1.14.4, f59c03d
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] Reloading
	[INFO] plugin/health: Going into lameduck mode for 5s
	[INFO] plugin/reload: Running configuration MD5 = 093a0bf1423dd8c4eee62372bb216168
	[INFO] Reloading complete
	[INFO] SIGTERM: Shutting down servers then terminating
	[INFO] plugin/health: Going into lameduck mode for 5s
	I0816 00:05:39.089365       1 trace.go:116] Trace[2019727887]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125 (started: 2024-08-16 00:05:09.088876528 +0000 UTC m=+0.041772691) (total time: 30.000384344s):
	Trace[2019727887]: [30.000384344s] [30.000384344s] END
	E0816 00:05:39.089390       1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
	I0816 00:05:39.089584       1 trace.go:116] Trace[939984059]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125 (started: 2024-08-16 00:05:09.089389082 +0000 UTC m=+0.042285245) (total time: 30.000182043s):
	Trace[939984059]: [30.000182043s] [30.000182043s] END
	E0816 00:05:39.089590       1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Endpoints: Get "https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
	I0816 00:05:39.090074       1 trace.go:116] Trace[911902081]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125 (started: 2024-08-16 00:05:09.088493473 +0000 UTC m=+0.041389636) (total time: 30.001562843s):
	Trace[911902081]: [30.001562843s] [30.001562843s] END
	E0816 00:05:39.090082       1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
	E0816 00:06:45.046755       1 reflector.go:382] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to watch *v1.Service: Get "https://10.96.0.1:443/api/v1/services?allowWatchBookmarks=true&resourceVersion=587&timeout=6m5s&timeoutSeconds=365&watch=true": dial tcp 10.96.0.1:443: connect: connection refused
	E0816 00:06:45.046804       1 reflector.go:382] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to watch *v1.Endpoints: Get "https://10.96.0.1:443/api/v1/endpoints?allowWatchBookmarks=true&resourceVersion=589&timeout=8m23s&timeoutSeconds=503&watch=true": dial tcp 10.96.0.1:443: connect: connection refused
	E0816 00:06:45.046846       1 reflector.go:382] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to watch *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?allowWatchBookmarks=true&resourceVersion=200&timeout=6m27s&timeoutSeconds=387&watch=true": dial tcp 10.96.0.1:443: connect: connection refused
	
	
	==> describe nodes <==
	Name:               old-k8s-version-894472
	Roles:              control-plane,master
	Labels:             beta.kubernetes.io/arch=arm64
	                    beta.kubernetes.io/os=linux
	                    kubernetes.io/arch=arm64
	                    kubernetes.io/hostname=old-k8s-version-894472
	                    kubernetes.io/os=linux
	                    minikube.k8s.io/commit=fe9c1d9e27059a205b0df8e5e482803b65ef8774
	                    minikube.k8s.io/name=old-k8s-version-894472
	                    minikube.k8s.io/primary=true
	                    minikube.k8s.io/updated_at=2024_08_16T00_04_51_0700
	                    minikube.k8s.io/version=v1.33.1
	                    node-role.kubernetes.io/control-plane=
	                    node-role.kubernetes.io/master=
	Annotations:        kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
	                    node.alpha.kubernetes.io/ttl: 0
	                    volumes.kubernetes.io/controller-managed-attach-detach: true
	CreationTimestamp:  Fri, 16 Aug 2024 00:04:48 +0000
	Taints:             <none>
	Unschedulable:      false
	Lease:
	  HolderIdentity:  old-k8s-version-894472
	  AcquireTime:     <unset>
	  RenewTime:       Fri, 16 Aug 2024 00:13:15 +0000
	Conditions:
	  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
	  ----             ------  -----------------                 ------------------                ------                       -------
	  MemoryPressure   False   Fri, 16 Aug 2024 00:13:15 +0000   Fri, 16 Aug 2024 00:04:39 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
	  DiskPressure     False   Fri, 16 Aug 2024 00:13:15 +0000   Fri, 16 Aug 2024 00:04:39 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
	  PIDPressure      False   Fri, 16 Aug 2024 00:13:15 +0000   Fri, 16 Aug 2024 00:04:39 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
	  Ready            True    Fri, 16 Aug 2024 00:13:15 +0000   Fri, 16 Aug 2024 00:05:05 +0000   KubeletReady                 kubelet is posting ready status
	Addresses:
	  InternalIP:  192.168.85.2
	  Hostname:    old-k8s-version-894472
	Capacity:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022364Ki
	  pods:               110
	Allocatable:
	  cpu:                2
	  ephemeral-storage:  203034800Ki
	  hugepages-1Gi:      0
	  hugepages-2Mi:      0
	  hugepages-32Mi:     0
	  hugepages-64Ki:     0
	  memory:             8022364Ki
	  pods:               110
	System Info:
	  Machine ID:                 712e30e2bb5b4aa5b88ceeda6cdeea71
	  System UUID:                3de89e43-0f81-4af5-9a27-89ca1d31f15a
	  Boot ID:                    cc0e1141-aa97-44ec-a7be-f3cd9b66c5f7
	  Kernel Version:             5.15.0-1067-aws
	  OS Image:                   Ubuntu 22.04.4 LTS
	  Operating System:           linux
	  Architecture:               arm64
	  Container Runtime Version:  docker://27.1.2
	  Kubelet Version:            v1.20.0
	  Kube-Proxy Version:         v1.20.0
	PodCIDR:                      10.244.0.0/24
	PodCIDRs:                     10.244.0.0/24
	Non-terminated Pods:          (11 in total)
	  Namespace                   Name                                              CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
	  ---------                   ----                                              ------------  ----------  ---------------  -------------  ---
	  default                     busybox                                           0 (0%)        0 (0%)      0 (0%)           0 (0%)         6m41s
	  kube-system                 coredns-74ff55c5b-jrtq9                           100m (5%)     0 (0%)      70Mi (0%)        170Mi (2%)     8m9s
	  kube-system                 etcd-old-k8s-version-894472                       100m (5%)     0 (0%)      100Mi (1%)       0 (0%)         8m20s
	  kube-system                 kube-apiserver-old-k8s-version-894472             250m (12%)    0 (0%)      0 (0%)           0 (0%)         8m20s
	  kube-system                 kube-controller-manager-old-k8s-version-894472    200m (10%)    0 (0%)      0 (0%)           0 (0%)         8m20s
	  kube-system                 kube-proxy-4n8ls                                  0 (0%)        0 (0%)      0 (0%)           0 (0%)         8m9s
	  kube-system                 kube-scheduler-old-k8s-version-894472             100m (5%)     0 (0%)      0 (0%)           0 (0%)         8m20s
	  kube-system                 metrics-server-9975d5f86-tt4kd                    100m (5%)     0 (0%)      200Mi (2%)       0 (0%)         6m31s
	  kube-system                 storage-provisioner                               0 (0%)        0 (0%)      0 (0%)           0 (0%)         8m6s
	  kubernetes-dashboard        dashboard-metrics-scraper-8d5bb5db8-vst6j         0 (0%)        0 (0%)      0 (0%)           0 (0%)         5m34s
	  kubernetes-dashboard        kubernetes-dashboard-cd95d586-k7zq2               0 (0%)        0 (0%)      0 (0%)           0 (0%)         5m34s
	Allocated resources:
	  (Total limits may be over 100 percent, i.e., overcommitted.)
	  Resource           Requests    Limits
	  --------           --------    ------
	  cpu                850m (42%)  0 (0%)
	  memory             370Mi (4%)  170Mi (2%)
	  ephemeral-storage  100Mi (0%)  0 (0%)
	  hugepages-1Gi      0 (0%)      0 (0%)
	  hugepages-2Mi      0 (0%)      0 (0%)
	  hugepages-32Mi     0 (0%)      0 (0%)
	  hugepages-64Ki     0 (0%)      0 (0%)
	Events:
	  Type    Reason                   Age                    From        Message
	  ----    ------                   ----                   ----        -------
	  Normal  Starting                 8m37s                  kubelet     Starting kubelet.
	  Normal  NodeHasSufficientMemory  8m37s (x5 over 8m37s)  kubelet     Node old-k8s-version-894472 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    8m37s (x5 over 8m37s)  kubelet     Node old-k8s-version-894472 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     8m37s (x5 over 8m37s)  kubelet     Node old-k8s-version-894472 status is now: NodeHasSufficientPID
	  Normal  NodeAllocatableEnforced  8m37s                  kubelet     Updated Node Allocatable limit across pods
	  Normal  Starting                 8m21s                  kubelet     Starting kubelet.
	  Normal  NodeHasSufficientMemory  8m21s                  kubelet     Node old-k8s-version-894472 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    8m21s                  kubelet     Node old-k8s-version-894472 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     8m21s                  kubelet     Node old-k8s-version-894472 status is now: NodeHasSufficientPID
	  Normal  NodeAllocatableEnforced  8m20s                  kubelet     Updated Node Allocatable limit across pods
	  Normal  NodeReady                8m10s                  kubelet     Node old-k8s-version-894472 status is now: NodeReady
	  Normal  Starting                 8m6s                   kube-proxy  Starting kube-proxy.
	  Normal  Starting                 6m7s                   kubelet     Starting kubelet.
	  Normal  NodeAllocatableEnforced  6m7s                   kubelet     Updated Node Allocatable limit across pods
	  Normal  NodeHasSufficientMemory  6m6s (x8 over 6m7s)    kubelet     Node old-k8s-version-894472 status is now: NodeHasSufficientMemory
	  Normal  NodeHasNoDiskPressure    6m6s (x8 over 6m7s)    kubelet     Node old-k8s-version-894472 status is now: NodeHasNoDiskPressure
	  Normal  NodeHasSufficientPID     6m6s (x7 over 6m7s)    kubelet     Node old-k8s-version-894472 status is now: NodeHasSufficientPID
	  Normal  Starting                 5m50s                  kube-proxy  Starting kube-proxy.
	
	
	==> dmesg <==
	
	
	==> etcd [15f34ed96b2b] <==
	raft2024/08/16 00:04:40 INFO: 9f0758e1c58a86ed became leader at term 2
	raft2024/08/16 00:04:40 INFO: raft.node: 9f0758e1c58a86ed elected leader 9f0758e1c58a86ed at term 2
	2024-08-16 00:04:40.078308 I | etcdserver: setting up the initial cluster version to 3.4
	2024-08-16 00:04:40.078459 I | embed: ready to serve client requests
	2024-08-16 00:04:40.079878 I | embed: serving client requests on 127.0.0.1:2379
	2024-08-16 00:04:40.080242 I | embed: ready to serve client requests
	2024-08-16 00:04:40.081492 I | embed: serving client requests on 192.168.85.2:2379
	2024-08-16 00:04:40.125684 I | etcdserver: published {Name:old-k8s-version-894472 ClientURLs:[https://192.168.85.2:2379]} to cluster 68eaea490fab4e05
	2024-08-16 00:04:40.127555 N | etcdserver/membership: set the initial cluster version to 3.4
	2024-08-16 00:04:40.128682 I | etcdserver/api: enabled capabilities for version 3.4
	2024-08-16 00:04:56.044472 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:04:58.785442 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:05:08.805723 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:05:18.785694 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:05:28.785392 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:05:38.787903 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:05:48.790025 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:05:58.787081 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:06:08.785406 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:06:18.785465 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:06:28.785683 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:06:38.785382 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:06:45.212146 N | pkg/osutil: received terminated signal, shutting down...
	WARNING: 2024/08/16 00:06:45 grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	2024-08-16 00:06:45.296668 I | etcdserver: skipped leadership transfer for single voting member cluster
	
	
	==> etcd [5aacba0afc73] <==
	2024-08-16 00:09:07.590100 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:09:17.589934 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:09:27.590545 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:09:37.590003 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:09:47.590047 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:09:57.590034 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:10:07.590097 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:10:17.590073 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:10:27.590074 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:10:37.589986 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:10:47.589998 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:10:57.589976 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:11:07.589990 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:11:17.589985 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:11:27.589987 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:11:37.589994 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:11:47.590123 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:11:57.590066 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:12:07.590168 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:12:17.589938 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:12:27.590057 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:12:37.590003 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:12:47.590130 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:12:57.590096 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	2024-08-16 00:13:07.591443 I | etcdserver/api/etcdhttp: /health OK (status code 200)
	
	
	==> kernel <==
	 00:13:15 up  8:55,  0 users,  load average: 1.54, 2.78, 3.78
	Linux old-k8s-version-894472 5.15.0-1067-aws #73~20.04.1-Ubuntu SMP Wed Jul 24 17:31:05 UTC 2024 aarch64 aarch64 aarch64 GNU/Linux
	PRETTY_NAME="Ubuntu 22.04.4 LTS"
	
	
	==> kube-apiserver [3d14903eaff5] <==
	W0816 00:06:45.312772       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.312816       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.312858       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.312903       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.312945       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.312987       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.313026       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.313065       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.313107       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	I0816 00:06:45.314191       1 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick
	I0816 00:06:45.314319       1 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick
	I0816 00:06:45.314528       1 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick
	W0816 00:06:45.314649       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.314853       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.314970       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.315019       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.316503       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.316563       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.316608       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.316665       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.316724       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.316765       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.316801       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.316837       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	W0816 00:06:45.316871       1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379  <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
	
	
	==> kube-apiserver [682baec10b08] <==
	I0816 00:09:50.663702       1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379  <nil> 0 <nil>}] <nil> <nil>}
	I0816 00:09:50.663711       1 clientconn.go:948] ClientConn switching balancer to "pick_first"
	W0816 00:10:25.858778       1 handler_proxy.go:102] no RequestInfo found in the context
	E0816 00:10:25.858912       1 controller.go:116] loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
	, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
	I0816 00:10:25.858932       1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
	I0816 00:10:27.023120       1 client.go:360] parsed scheme: "passthrough"
	I0816 00:10:27.023167       1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379  <nil> 0 <nil>}] <nil> <nil>}
	I0816 00:10:27.023176       1 clientconn.go:948] ClientConn switching balancer to "pick_first"
	I0816 00:11:05.478410       1 client.go:360] parsed scheme: "passthrough"
	I0816 00:11:05.478456       1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379  <nil> 0 <nil>}] <nil> <nil>}
	I0816 00:11:05.478465       1 clientconn.go:948] ClientConn switching balancer to "pick_first"
	I0816 00:11:41.279258       1 client.go:360] parsed scheme: "passthrough"
	I0816 00:11:41.279307       1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379  <nil> 0 <nil>}] <nil> <nil>}
	I0816 00:11:41.279316       1 clientconn.go:948] ClientConn switching balancer to "pick_first"
	W0816 00:12:23.659838       1 handler_proxy.go:102] no RequestInfo found in the context
	E0816 00:12:23.660033       1 controller.go:116] loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
	, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
	I0816 00:12:23.660069       1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
	I0816 00:12:23.910084       1 client.go:360] parsed scheme: "passthrough"
	I0816 00:12:23.910266       1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379  <nil> 0 <nil>}] <nil> <nil>}
	I0816 00:12:23.910285       1 clientconn.go:948] ClientConn switching balancer to "pick_first"
	I0816 00:12:53.997069       1 client.go:360] parsed scheme: "passthrough"
	I0816 00:12:53.997112       1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379  <nil> 0 <nil>}] <nil> <nil>}
	I0816 00:12:53.997121       1 clientconn.go:948] ClientConn switching balancer to "pick_first"
	
	
	==> kube-controller-manager [821653363c67] <==
	I0816 00:05:06.714136       1 node_lifecycle_controller.go:1429] Initializing eviction metric for zone: 
	W0816 00:05:06.714197       1 node_lifecycle_controller.go:1044] Missing timestamp for Node old-k8s-version-894472. Assuming now as a timestamp.
	I0816 00:05:06.714232       1 node_lifecycle_controller.go:1245] Controller detected that zone  is now in state Normal.
	I0816 00:05:06.714447       1 shared_informer.go:247] Caches are synced for resource quota 
	I0816 00:05:06.714559       1 event.go:291] "Event occurred" object="old-k8s-version-894472" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node old-k8s-version-894472 event: Registered Node old-k8s-version-894472 in Controller"
	I0816 00:05:06.735914       1 shared_informer.go:247] Caches are synced for resource quota 
	I0816 00:05:06.811375       1 shared_informer.go:247] Caches are synced for persistent volume 
	I0816 00:05:06.865254       1 event.go:291] "Event occurred" object="kube-system/coredns" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-74ff55c5b to 2"
	I0816 00:05:06.899350       1 event.go:291] "Event occurred" object="kube-system/coredns-74ff55c5b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-74ff55c5b-9l4p8"
	I0816 00:05:06.899571       1 event.go:291] "Event occurred" object="kube-system/kube-proxy" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-4n8ls"
	I0816 00:05:06.964894       1 shared_informer.go:240] Waiting for caches to sync for garbage collector
	I0816 00:05:07.037464       1 event.go:291] "Event occurred" object="kube-system/coredns-74ff55c5b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-74ff55c5b-jrtq9"
	E0816 00:05:07.049580       1 clusterroleaggregation_controller.go:181] edit failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "edit": the object has been modified; please apply your changes to the latest version and try again
	E0816 00:05:07.125178       1 daemon_controller.go:320] kube-system/kube-proxy failed with : error storing status for daemon set &v1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"4b07e50b-070c-49d6-b592-1506db189549", ResourceVersion:"278", Generation:1, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63859363491, loc:(*time.Location)(0x632eb80)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"kube-proxy"}, Annotations:map[string]string{"deprecated.daemonset.template.generation":"1"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"kubeadm", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0x400069d320), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0x400069d340)}}}, Spec:v1.DaemonSetSpec{Selector:(*v1.
LabelSelector)(0x400069d360), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"kube-proxy"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"kube-proxy", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.Gl
usterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(0x4000df78c0), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil)}}, v1.Volume{Name:"xtables-lock", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(0x400069d
380), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeS
ource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil)}}, v1.Volume{Name:"lib-modules", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(0x400069d3a0), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil),
AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil)}}}, InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"kube-proxy", Image:"k8s.gcr.io/kube-proxy:v1.20.0", Command:[]string{"/usr/local/bin/kube-proxy", "--config=/var/lib/kube-proxy/config.conf", "--hostname-override=$(NODE_NAME)"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar{v1.EnvVar{Name:"NODE_NAME", Value:"", ValueFrom:(*v1.EnvVarSource)(0x400069d3e0)}}, Resources:v1.R
esourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-proxy", ReadOnly:false, MountPath:"/var/lib/kube-proxy", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}, v1.VolumeMount{Name:"xtables-lock", ReadOnly:false, MountPath:"/run/xtables.lock", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}, v1.VolumeMount{Name:"lib-modules", ReadOnly:true, MountPath:"/lib/modules", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(0x4000e85920), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPo
licy:"Always", TerminationGracePeriodSeconds:(*int64)(0x400064e518), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string{"kubernetes.io/os":"linux"}, ServiceAccountName:"kube-proxy", DeprecatedServiceAccount:"kube-proxy", AutomountServiceAccountToken:(*bool)(nil), NodeName:"", HostNetwork:true, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0x40005a9b20), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"CriticalAddonsOnly", Operator:"Exists", Value:"", Effect:"", TolerationSeconds:(*int64)(nil)}, v1.Toleration{Key:"", Operator:"Exists", Value:"", Effect:"", TolerationSeconds:(*int64)(nil)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"system-node-critical", Priority:(*int32)(nil), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), Runtime
ClassName:(*string)(nil), EnableServiceLinks:(*bool)(nil), PreemptionPolicy:(*v1.PreemptionPolicy)(nil), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil), SetHostnameAsFQDN:(*bool)(nil)}}, UpdateStrategy:v1.DaemonSetUpdateStrategy{Type:"RollingUpdate", RollingUpdate:(*v1.RollingUpdateDaemonSet)(0x40002869a0)}, MinReadySeconds:0, RevisionHistoryLimit:(*int32)(0x400064e5b8)}, Status:v1.DaemonSetStatus{CurrentNumberScheduled:0, NumberMisscheduled:0, DesiredNumberScheduled:0, NumberReady:0, ObservedGeneration:0, UpdatedNumberScheduled:0, NumberAvailable:0, NumberUnavailable:0, CollisionCount:(*int32)(nil), Conditions:[]v1.DaemonSetCondition(nil)}}: Operation cannot be fulfilled on daemonsets.apps "kube-proxy": the object has been modified; please apply your changes to the latest version and try again
	I0816 00:05:07.165414       1 shared_informer.go:247] Caches are synced for garbage collector 
	I0816 00:05:07.211431       1 shared_informer.go:247] Caches are synced for garbage collector 
	I0816 00:05:07.211456       1 garbagecollector.go:151] Garbage collector: all resource monitors have synced. Proceeding to collect garbage
	I0816 00:05:10.030382       1 event.go:291] "Event occurred" object="kube-system/coredns" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-74ff55c5b to 1"
	I0816 00:05:10.047326       1 event.go:291] "Event occurred" object="kube-system/coredns-74ff55c5b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-74ff55c5b-9l4p8"
	I0816 00:06:43.633203       1 event.go:291] "Event occurred" object="kube-system/metrics-server" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set metrics-server-9975d5f86 to 1"
	E0816 00:06:43.828786       1 clusterroleaggregation_controller.go:181] edit failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "edit": the object has been modified; please apply your changes to the latest version and try again
	I0816 00:06:44.690678       1 event.go:291] "Event occurred" object="kube-system/metrics-server-9975d5f86" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: metrics-server-9975d5f86-tt4kd"
	W0816 00:06:45.344029       1 endpointslice_controller.go:284] Error syncing endpoint slices for service "kube-system/metrics-server", retrying. Error: failed to update metrics-server-zb6js EndpointSlice for Service kube-system/metrics-server: Put "https://192.168.85.2:8443/apis/discovery.k8s.io/v1beta1/namespaces/kube-system/endpointslices/metrics-server-zb6js": dial tcp 192.168.85.2:8443: connect: connection refused
	I0816 00:06:45.344279       1 event.go:291] "Event occurred" object="kube-system/metrics-server" kind="Service" apiVersion="v1" type="Warning" reason="FailedToUpdateEndpointSlices" message="Error updating Endpoint Slices for Service kube-system/metrics-server: failed to update metrics-server-zb6js EndpointSlice for Service kube-system/metrics-server: Put \"https://192.168.85.2:8443/apis/discovery.k8s.io/v1beta1/namespaces/kube-system/endpointslices/metrics-server-zb6js\": dial tcp 192.168.85.2:8443: connect: connection refused"
	E0816 00:06:45.344435       1 event.go:273] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"metrics-server.17ec0cc6cc23c38d", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Service", Namespace:"kube-system", Name:"metrics-server", UID:"da33dae1-dc07-40c5-ab20-ef3b4549504c", APIVersion:"v1", ResourceVersion:"587", FieldPath:""}, Reason:"FailedToUpdateEndpointSlices", Message:"Error updating Endpoint Slices for Service kube-system/metrics-server: failed to update metrics-server-zb6js EndpointSlice f
or Service kube-system/metrics-server: Put \"https://192.168.85.2:8443/apis/discovery.k8s.io/v1beta1/namespaces/kube-system/endpointslices/metrics-server-zb6js\": dial tcp 192.168.85.2:8443: connect: connection refused", Source:v1.EventSource{Component:"endpoint-slice-controller", Host:""}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc1a7c3e55481318d, ext:125858843581, loc:(*time.Location)(0x632eb80)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc1a7c3e55481318d, ext:125858843581, loc:(*time.Location)(0x632eb80)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Post "https://192.168.85.2:8443/api/v1/namespaces/kube-system/events": dial tcp 192.168.85.2:8443: connect: connection refused'(may retry after sleeping)
	
	
	==> kube-controller-manager [cc3ceefdfcf9] <==
	W0816 00:08:46.783852       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0816 00:09:12.838085       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0816 00:09:18.434378       1 request.go:655] Throttling request took 1.048567822s, request: GET:https://192.168.85.2:8443/apis/scheduling.k8s.io/v1?timeout=32s
	W0816 00:09:19.286278       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0816 00:09:43.339796       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0816 00:09:50.936802       1 request.go:655] Throttling request took 1.048688525s, request: GET:https://192.168.85.2:8443/apis/authorization.k8s.io/v1?timeout=32s
	W0816 00:09:51.788096       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0816 00:10:13.841394       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0816 00:10:23.438529       1 request.go:655] Throttling request took 1.048338912s, request: GET:https://192.168.85.2:8443/apis/extensions/v1beta1?timeout=32s
	W0816 00:10:24.290032       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0816 00:10:44.343184       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0816 00:10:55.940699       1 request.go:655] Throttling request took 1.044173579s, request: GET:https://192.168.85.2:8443/apis/extensions/v1beta1?timeout=32s
	W0816 00:10:56.792111       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0816 00:11:14.848028       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0816 00:11:28.442555       1 request.go:655] Throttling request took 1.04839984s, request: GET:https://192.168.85.2:8443/apis/extensions/v1beta1?timeout=32s
	W0816 00:11:29.293830       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0816 00:11:45.350891       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0816 00:12:00.947881       1 request.go:655] Throttling request took 1.047745865s, request: GET:https://192.168.85.2:8443/apis/extensions/v1beta1?timeout=32s
	W0816 00:12:01.799745       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0816 00:12:15.852703       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0816 00:12:33.450326       1 request.go:655] Throttling request took 1.048475585s, request: GET:https://192.168.85.2:8443/apis/scheduling.k8s.io/v1beta1?timeout=32s
	W0816 00:12:34.305364       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	E0816 00:12:46.354509       1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
	I0816 00:13:05.955724       1 request.go:655] Throttling request took 1.048076971s, request: GET:https://192.168.85.2:8443/apis/extensions/v1beta1?timeout=32s
	W0816 00:13:06.807773       1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
	
	
	==> kube-proxy [1e79f4e5d490] <==
	I0816 00:05:09.412461       1 node.go:172] Successfully retrieved node IP: 192.168.85.2
	I0816 00:05:09.412555       1 server_others.go:142] kube-proxy node IP is an IPv4 address (192.168.85.2), assume IPv4 operation
	W0816 00:05:09.511630       1 server_others.go:578] Unknown proxy mode "", assuming iptables proxy
	I0816 00:05:09.511769       1 server_others.go:185] Using iptables Proxier.
	I0816 00:05:09.512000       1 server.go:650] Version: v1.20.0
	I0816 00:05:09.512955       1 config.go:315] Starting service config controller
	I0816 00:05:09.512977       1 shared_informer.go:240] Waiting for caches to sync for service config
	I0816 00:05:09.512999       1 config.go:224] Starting endpoint slice config controller
	I0816 00:05:09.513003       1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
	I0816 00:05:09.613104       1 shared_informer.go:247] Caches are synced for endpoint slice config 
	I0816 00:05:09.613183       1 shared_informer.go:247] Caches are synced for service config 
	
	
	==> kube-proxy [c5eeddd51e95] <==
	I0816 00:07:25.738903       1 node.go:172] Successfully retrieved node IP: 192.168.85.2
	I0816 00:07:25.738984       1 server_others.go:142] kube-proxy node IP is an IPv4 address (192.168.85.2), assume IPv4 operation
	W0816 00:07:25.784795       1 server_others.go:578] Unknown proxy mode "", assuming iptables proxy
	I0816 00:07:25.784895       1 server_others.go:185] Using iptables Proxier.
	I0816 00:07:25.790418       1 server.go:650] Version: v1.20.0
	I0816 00:07:25.791025       1 config.go:315] Starting service config controller
	I0816 00:07:25.791041       1 shared_informer.go:240] Waiting for caches to sync for service config
	I0816 00:07:25.791750       1 config.go:224] Starting endpoint slice config controller
	I0816 00:07:25.791758       1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
	I0816 00:07:25.891413       1 shared_informer.go:247] Caches are synced for service config 
	I0816 00:07:25.892527       1 shared_informer.go:247] Caches are synced for endpoint slice config 
	
	
	==> kube-scheduler [003fa784026a] <==
	I0816 00:04:42.631485       1 serving.go:331] Generated self-signed cert in-memory
	W0816 00:04:48.098756       1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system.  Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
	W0816 00:04:48.098813       1 authentication.go:332] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
	W0816 00:04:48.098828       1 authentication.go:333] Continuing without authentication configuration. This may treat all requests as anonymous.
	W0816 00:04:48.098838       1 authentication.go:334] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
	I0816 00:04:48.159120       1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	I0816 00:04:48.159141       1 shared_informer.go:240] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	I0816 00:04:48.166967       1 secure_serving.go:197] Serving securely on 127.0.0.1:10259
	I0816 00:04:48.167164       1 tlsconfig.go:240] Starting DynamicServingCertificateController
	E0816 00:04:48.190660       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
	E0816 00:04:48.192378       1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
	E0816 00:04:48.192552       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
	E0816 00:04:48.192637       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1beta1.PodDisruptionBudget: failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
	E0816 00:04:48.192710       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
	E0816 00:04:48.192790       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
	E0816 00:04:48.192860       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
	E0816 00:04:48.192928       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
	E0816 00:04:48.192995       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	E0816 00:04:48.193060       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
	E0816 00:04:48.193117       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
	E0816 00:04:48.193239       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
	E0816 00:04:49.061534       1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
	E0816 00:04:49.186075       1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
	I0816 00:04:51.159237       1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file 
	
	
	==> kube-scheduler [67be7ec054c6] <==
	I0816 00:07:15.155786       1 serving.go:331] Generated self-signed cert in-memory
	W0816 00:07:22.673847       1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system.  Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
	W0816 00:07:22.673880       1 authentication.go:332] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
	W0816 00:07:22.673910       1 authentication.go:333] Continuing without authentication configuration. This may treat all requests as anonymous.
	W0816 00:07:22.673917       1 authentication.go:334] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
	I0816 00:07:22.802787       1 secure_serving.go:197] Serving securely on 127.0.0.1:10259
	I0816 00:07:22.811089       1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	I0816 00:07:22.811176       1 shared_informer.go:240] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
	I0816 00:07:22.811232       1 tlsconfig.go:240] Starting DynamicServingCertificateController
	I0816 00:07:22.916826       1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file 
	
	
	==> kubelet <==
	Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458707    1361 remote_image.go:113] PullImage "registry.k8s.io/echoserver:1.4" from image service failed: rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/
	Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458746    1361 kuberuntime_image.go:51] Pull image "registry.k8s.io/echoserver:1.4" failed: rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/
	Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458868    1361 kuberuntime_manager.go:829] container &Container{Name:dashboard-metrics-scraper,Image:registry.k8s.io/echoserver:1.4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:,HostPort:0,ContainerPort:8000,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:tmp-volume,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kubernetes-dashboard-token-2w5nt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:&Probe{Handler:Handler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 8000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,},InitialDelaySeconds:30,TimeoutSeconds:30,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,},ReadinessProbe:nil,Lifecycle:nil,Terminatio
nMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*1001,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:*2001,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df): ErrImagePull: rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/
	Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458905    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.902219    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.905343    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Aug 16 00:11:16 old-k8s-version-894472 kubelet[1361]: E0816 00:11:16.908006    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Aug 16 00:11:17 old-k8s-version-894472 kubelet[1361]: E0816 00:11:17.912657    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Aug 16 00:11:31 old-k8s-version-894472 kubelet[1361]: E0816 00:11:31.902307    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Aug 16 00:11:32 old-k8s-version-894472 kubelet[1361]: E0816 00:11:32.902285    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Aug 16 00:11:43 old-k8s-version-894472 kubelet[1361]: E0816 00:11:43.902913    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Aug 16 00:11:47 old-k8s-version-894472 kubelet[1361]: E0816 00:11:47.902276    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Aug 16 00:11:56 old-k8s-version-894472 kubelet[1361]: E0816 00:11:56.907938    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Aug 16 00:12:01 old-k8s-version-894472 kubelet[1361]: E0816 00:12:01.910769    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Aug 16 00:12:07 old-k8s-version-894472 kubelet[1361]: E0816 00:12:07.902409    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Aug 16 00:12:12 old-k8s-version-894472 kubelet[1361]: E0816 00:12:12.925048    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Aug 16 00:12:18 old-k8s-version-894472 kubelet[1361]: E0816 00:12:18.903513    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Aug 16 00:12:58 old-k8s-version-894472 kubelet[1361]: E0816 00:12:58.902545    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	Aug 16 00:13:02 old-k8s-version-894472 kubelet[1361]: E0816 00:13:02.903209    1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
	Aug 16 00:13:13 old-k8s-version-894472 kubelet[1361]: E0816 00:13:13.908004    1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
	
	
	==> kubernetes-dashboard [3ef1e388df06] <==
	2024/08/16 00:07:48 Starting overwatch
	2024/08/16 00:07:48 Using namespace: kubernetes-dashboard
	2024/08/16 00:07:48 Using in-cluster config to connect to apiserver
	2024/08/16 00:07:48 Using secret token for csrf signing
	2024/08/16 00:07:48 Initializing csrf token from kubernetes-dashboard-csrf secret
	2024/08/16 00:07:48 Empty token. Generating and storing in a secret kubernetes-dashboard-csrf
	2024/08/16 00:07:48 Successful initial request to the apiserver, version: v1.20.0
	2024/08/16 00:07:48 Generating JWE encryption key
	2024/08/16 00:07:48 New synchronizer has been registered: kubernetes-dashboard-key-holder-kubernetes-dashboard. Starting
	2024/08/16 00:07:48 Starting secret synchronizer for kubernetes-dashboard-key-holder in namespace kubernetes-dashboard
	2024/08/16 00:07:48 Initializing JWE encryption key from synchronized object
	2024/08/16 00:07:48 Creating in-cluster Sidecar client
	2024/08/16 00:07:48 Serving insecurely on HTTP port: 9090
	2024/08/16 00:07:48 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/08/16 00:08:18 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/08/16 00:08:48 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/08/16 00:09:18 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/08/16 00:09:48 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/08/16 00:10:18 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/08/16 00:10:48 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/08/16 00:11:18 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/08/16 00:11:48 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/08/16 00:12:18 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2024/08/16 00:12:48 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	
	
	==> storage-provisioner [a6efcfb5cb17] <==
	I0816 00:08:08.052876       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	I0816 00:08:08.086690       1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
	I0816 00:08:08.086919       1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
	I0816 00:08:25.560208       1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
	I0816 00:08:25.560444       1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-894472_37e43bd1-48d4-41ee-b36e-da33dd79404d!
	I0816 00:08:25.562003       1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"104451a7-4333-4924-b806-36d8821f9dfd", APIVersion:"v1", ResourceVersion:"814", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-894472_37e43bd1-48d4-41ee-b36e-da33dd79404d became leader
	I0816 00:08:25.661174       1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-894472_37e43bd1-48d4-41ee-b36e-da33dd79404d!
	
	
	==> storage-provisioner [de096650c620] <==
	I0816 00:07:26.083288       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	F0816 00:07:56.086818       1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout
	

                                                
                                                
-- /stdout --
helpers_test.go:254: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-894472 -n old-k8s-version-894472
helpers_test.go:261: (dbg) Run:  kubectl --context old-k8s-version-894472 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: metrics-server-9975d5f86-tt4kd dashboard-metrics-scraper-8d5bb5db8-vst6j
helpers_test.go:274: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/SecondStart]: describe non-running pods <======
helpers_test.go:277: (dbg) Run:  kubectl --context old-k8s-version-894472 describe pod metrics-server-9975d5f86-tt4kd dashboard-metrics-scraper-8d5bb5db8-vst6j
helpers_test.go:277: (dbg) Non-zero exit: kubectl --context old-k8s-version-894472 describe pod metrics-server-9975d5f86-tt4kd dashboard-metrics-scraper-8d5bb5db8-vst6j: exit status 1 (91.545668ms)

                                                
                                                
** stderr ** 
	Error from server (NotFound): pods "metrics-server-9975d5f86-tt4kd" not found
	Error from server (NotFound): pods "dashboard-metrics-scraper-8d5bb5db8-vst6j" not found

                                                
                                                
** /stderr **
helpers_test.go:279: kubectl --context old-k8s-version-894472 describe pod metrics-server-9975d5f86-tt4kd dashboard-metrics-scraper-8d5bb5db8-vst6j: exit status 1
--- FAIL: TestStartStop/group/old-k8s-version/serial/SecondStart (380.63s)

                                                
                                    

Test pass (318/343)

Order passed test Duration
3 TestDownloadOnly/v1.20.0/json-events 12.88
4 TestDownloadOnly/v1.20.0/preload-exists 0
8 TestDownloadOnly/v1.20.0/LogsDuration 0.07
9 TestDownloadOnly/v1.20.0/DeleteAll 0.2
10 TestDownloadOnly/v1.20.0/DeleteAlwaysSucceeds 0.13
12 TestDownloadOnly/v1.31.0/json-events 6.71
13 TestDownloadOnly/v1.31.0/preload-exists 0
17 TestDownloadOnly/v1.31.0/LogsDuration 0.08
18 TestDownloadOnly/v1.31.0/DeleteAll 0.21
19 TestDownloadOnly/v1.31.0/DeleteAlwaysSucceeds 0.13
21 TestBinaryMirror 0.58
22 TestOffline 85.2
25 TestAddons/PreSetup/EnablingAddonOnNonExistingCluster 0.07
26 TestAddons/PreSetup/DisablingAddonOnNonExistingCluster 0.06
27 TestAddons/Setup 220.31
29 TestAddons/serial/Volcano 42.09
31 TestAddons/serial/GCPAuth/Namespaces 0.19
33 TestAddons/parallel/Registry 16.88
34 TestAddons/parallel/Ingress 19.13
35 TestAddons/parallel/InspektorGadget 10.89
36 TestAddons/parallel/MetricsServer 6.72
39 TestAddons/parallel/CSI 38.98
40 TestAddons/parallel/Headlamp 16.85
41 TestAddons/parallel/CloudSpanner 6.63
42 TestAddons/parallel/LocalPath 52.85
43 TestAddons/parallel/NvidiaDevicePlugin 6.6
44 TestAddons/parallel/Yakd 11.7
45 TestAddons/StoppedEnableDisable 11.03
46 TestCertOptions 38
47 TestCertExpiration 247.09
48 TestDockerFlags 47.07
49 TestForceSystemdFlag 35.01
50 TestForceSystemdEnv 42.44
56 TestErrorSpam/setup 28.73
57 TestErrorSpam/start 0.71
58 TestErrorSpam/status 1.22
59 TestErrorSpam/pause 1.41
60 TestErrorSpam/unpause 1.46
61 TestErrorSpam/stop 10.94
64 TestFunctional/serial/CopySyncFile 0
65 TestFunctional/serial/StartWithProxy 69.87
66 TestFunctional/serial/AuditLog 0
67 TestFunctional/serial/SoftStart 34.44
68 TestFunctional/serial/KubeContext 0.07
69 TestFunctional/serial/KubectlGetPods 0.12
72 TestFunctional/serial/CacheCmd/cache/add_remote 3.38
73 TestFunctional/serial/CacheCmd/cache/add_local 0.98
74 TestFunctional/serial/CacheCmd/cache/CacheDelete 0.06
75 TestFunctional/serial/CacheCmd/cache/list 0.07
76 TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node 0.3
77 TestFunctional/serial/CacheCmd/cache/cache_reload 1.56
78 TestFunctional/serial/CacheCmd/cache/delete 0.11
79 TestFunctional/serial/MinikubeKubectlCmd 0.13
80 TestFunctional/serial/MinikubeKubectlCmdDirectly 0.13
81 TestFunctional/serial/ExtraConfig 44.48
82 TestFunctional/serial/ComponentHealth 0.11
83 TestFunctional/serial/LogsCmd 1.12
84 TestFunctional/serial/LogsFileCmd 1.18
85 TestFunctional/serial/InvalidService 4.31
87 TestFunctional/parallel/ConfigCmd 0.51
88 TestFunctional/parallel/DashboardCmd 14.16
89 TestFunctional/parallel/DryRun 0.5
90 TestFunctional/parallel/InternationalLanguage 0.21
91 TestFunctional/parallel/StatusCmd 1.08
95 TestFunctional/parallel/ServiceCmdConnect 11.62
96 TestFunctional/parallel/AddonsCmd 0.21
97 TestFunctional/parallel/PersistentVolumeClaim 26.65
99 TestFunctional/parallel/SSHCmd 0.7
100 TestFunctional/parallel/CpCmd 2.33
102 TestFunctional/parallel/FileSync 0.33
103 TestFunctional/parallel/CertSync 2.13
107 TestFunctional/parallel/NodeLabels 0.1
109 TestFunctional/parallel/NonActiveRuntimeDisabled 0.38
111 TestFunctional/parallel/License 0.22
113 TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel 0.82
114 TestFunctional/parallel/TunnelCmd/serial/StartTunnel 0
116 TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup 10.45
117 TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP 0.1
118 TestFunctional/parallel/TunnelCmd/serial/AccessDirect 0
122 TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel 0.11
123 TestFunctional/parallel/ServiceCmd/DeployApp 7.21
124 TestFunctional/parallel/ProfileCmd/profile_not_create 0.38
125 TestFunctional/parallel/ProfileCmd/profile_list 0.38
126 TestFunctional/parallel/ProfileCmd/profile_json_output 0.38
127 TestFunctional/parallel/MountCmd/any-port 7.5
128 TestFunctional/parallel/ServiceCmd/List 0.63
129 TestFunctional/parallel/ServiceCmd/JSONOutput 0.54
130 TestFunctional/parallel/ServiceCmd/HTTPS 0.39
131 TestFunctional/parallel/ServiceCmd/Format 0.4
132 TestFunctional/parallel/ServiceCmd/URL 0.43
133 TestFunctional/parallel/MountCmd/specific-port 2.33
134 TestFunctional/parallel/MountCmd/VerifyCleanup 2.5
135 TestFunctional/parallel/Version/short 0.08
136 TestFunctional/parallel/Version/components 1.16
137 TestFunctional/parallel/ImageCommands/ImageListShort 0.23
138 TestFunctional/parallel/ImageCommands/ImageListTable 0.23
139 TestFunctional/parallel/ImageCommands/ImageListJson 0.25
140 TestFunctional/parallel/ImageCommands/ImageListYaml 0.24
141 TestFunctional/parallel/ImageCommands/ImageBuild 2.49
142 TestFunctional/parallel/ImageCommands/Setup 0.78
143 TestFunctional/parallel/ImageCommands/ImageLoadDaemon 1.41
144 TestFunctional/parallel/ImageCommands/ImageReloadDaemon 0.8
145 TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon 1.03
146 TestFunctional/parallel/ImageCommands/ImageSaveToFile 0.32
147 TestFunctional/parallel/ImageCommands/ImageRemove 0.43
148 TestFunctional/parallel/ImageCommands/ImageLoadFromFile 0.64
149 TestFunctional/parallel/ImageCommands/ImageSaveDaemon 0.37
150 TestFunctional/parallel/UpdateContextCmd/no_changes 0.2
151 TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster 0.15
152 TestFunctional/parallel/UpdateContextCmd/no_clusters 0.14
153 TestFunctional/parallel/DockerEnv/bash 1.03
154 TestFunctional/delete_echo-server_images 0.04
155 TestFunctional/delete_my-image_image 0.02
156 TestFunctional/delete_minikube_cached_images 0.03
160 TestMultiControlPlane/serial/StartCluster 127.93
161 TestMultiControlPlane/serial/DeployApp 6.94
162 TestMultiControlPlane/serial/PingHostFromPods 1.66
163 TestMultiControlPlane/serial/AddWorkerNode 25.7
164 TestMultiControlPlane/serial/NodeLabels 0.11
165 TestMultiControlPlane/serial/HAppyAfterClusterStart 0.8
166 TestMultiControlPlane/serial/CopyFile 19.04
167 TestMultiControlPlane/serial/StopSecondaryNode 11.78
168 TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop 0.59
169 TestMultiControlPlane/serial/RestartSecondaryNode 39.97
170 TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart 8.74
171 TestMultiControlPlane/serial/RestartClusterKeepsNodes 146.17
172 TestMultiControlPlane/serial/DeleteSecondaryNode 11.21
173 TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete 0.55
174 TestMultiControlPlane/serial/StopCluster 32.92
175 TestMultiControlPlane/serial/RestartCluster 141.41
176 TestMultiControlPlane/serial/DegradedAfterClusterRestart 0.61
177 TestMultiControlPlane/serial/AddSecondaryNode 47.65
178 TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd 0.81
181 TestImageBuild/serial/Setup 32.31
182 TestImageBuild/serial/NormalBuild 1.8
183 TestImageBuild/serial/BuildWithBuildArg 1.1
184 TestImageBuild/serial/BuildWithDockerIgnore 0.91
185 TestImageBuild/serial/BuildWithSpecifiedDockerfile 0.75
189 TestJSONOutput/start/Command 73.81
190 TestJSONOutput/start/Audit 0
192 TestJSONOutput/start/parallel/DistinctCurrentSteps 0
193 TestJSONOutput/start/parallel/IncreasingCurrentSteps 0
195 TestJSONOutput/pause/Command 0.6
196 TestJSONOutput/pause/Audit 0
198 TestJSONOutput/pause/parallel/DistinctCurrentSteps 0
199 TestJSONOutput/pause/parallel/IncreasingCurrentSteps 0
201 TestJSONOutput/unpause/Command 0.55
202 TestJSONOutput/unpause/Audit 0
204 TestJSONOutput/unpause/parallel/DistinctCurrentSteps 0
205 TestJSONOutput/unpause/parallel/IncreasingCurrentSteps 0
207 TestJSONOutput/stop/Command 5.77
208 TestJSONOutput/stop/Audit 0
210 TestJSONOutput/stop/parallel/DistinctCurrentSteps 0
211 TestJSONOutput/stop/parallel/IncreasingCurrentSteps 0
212 TestErrorJSONOutput 0.27
214 TestKicCustomNetwork/create_custom_network 31.89
215 TestKicCustomNetwork/use_default_bridge_network 31.07
216 TestKicExistingNetwork 32.78
217 TestKicCustomSubnet 33.75
218 TestKicStaticIP 35.92
219 TestMainNoArgs 0.05
220 TestMinikubeProfile 68.9
223 TestMountStart/serial/StartWithMountFirst 7.57
224 TestMountStart/serial/VerifyMountFirst 0.25
225 TestMountStart/serial/StartWithMountSecond 8.5
226 TestMountStart/serial/VerifyMountSecond 0.26
227 TestMountStart/serial/DeleteFirst 1.45
228 TestMountStart/serial/VerifyMountPostDelete 0.25
229 TestMountStart/serial/Stop 1.2
230 TestMountStart/serial/RestartStopped 8.58
231 TestMountStart/serial/VerifyMountPostStop 0.25
234 TestMultiNode/serial/FreshStart2Nodes 82.75
235 TestMultiNode/serial/DeployApp2Nodes 35.93
236 TestMultiNode/serial/PingHostFrom2Pods 1.04
237 TestMultiNode/serial/AddNode 20.91
238 TestMultiNode/serial/MultiNodeLabels 0.14
239 TestMultiNode/serial/ProfileList 0.33
240 TestMultiNode/serial/CopyFile 10.16
241 TestMultiNode/serial/StopNode 2.23
242 TestMultiNode/serial/StartAfterStop 11.61
243 TestMultiNode/serial/RestartKeepsNodes 99.72
244 TestMultiNode/serial/DeleteNode 5.64
245 TestMultiNode/serial/StopMultiNode 21.71
246 TestMultiNode/serial/RestartMultiNode 53.14
247 TestMultiNode/serial/ValidateNameConflict 34.96
252 TestPreload 113.06
254 TestScheduledStopUnix 104.36
255 TestSkaffold 117.25
257 TestInsufficientStorage 12.26
258 TestRunningBinaryUpgrade 102.51
260 TestKubernetesUpgrade 189.52
261 TestMissingContainerUpgrade 111.7
273 TestStoppedBinaryUpgrade/Setup 0.72
274 TestStoppedBinaryUpgrade/Upgrade 102.37
275 TestStoppedBinaryUpgrade/MinikubeLogs 1.55
277 TestPause/serial/Start 80.01
286 TestNoKubernetes/serial/StartNoK8sWithVersion 0.09
287 TestNoKubernetes/serial/StartWithK8s 36.48
288 TestNoKubernetes/serial/StartWithStopK8s 17.74
289 TestPause/serial/SecondStartNoReconfiguration 29.16
290 TestNoKubernetes/serial/Start 7.12
291 TestNoKubernetes/serial/VerifyK8sNotRunning 0.39
292 TestNoKubernetes/serial/ProfileList 1.12
293 TestNoKubernetes/serial/Stop 1.24
294 TestNoKubernetes/serial/StartNoArgs 8.21
295 TestNoKubernetes/serial/VerifyK8sNotRunningSecond 0.59
296 TestNetworkPlugins/group/auto/Start 77.95
297 TestPause/serial/Pause 0.72
298 TestPause/serial/VerifyStatus 0.41
299 TestPause/serial/Unpause 0.83
300 TestPause/serial/PauseAgain 0.96
301 TestPause/serial/DeletePaused 2.72
302 TestPause/serial/VerifyDeletedResources 0.31
303 TestNetworkPlugins/group/flannel/Start 60.79
304 TestNetworkPlugins/group/flannel/ControllerPod 6.01
305 TestNetworkPlugins/group/flannel/KubeletFlags 0.3
306 TestNetworkPlugins/group/flannel/NetCatPod 11.27
307 TestNetworkPlugins/group/auto/KubeletFlags 0.34
308 TestNetworkPlugins/group/auto/NetCatPod 11.36
309 TestNetworkPlugins/group/flannel/DNS 0.22
310 TestNetworkPlugins/group/flannel/Localhost 0.16
311 TestNetworkPlugins/group/flannel/HairPin 0.17
312 TestNetworkPlugins/group/auto/DNS 0.19
313 TestNetworkPlugins/group/auto/Localhost 0.16
314 TestNetworkPlugins/group/auto/HairPin 0.18
315 TestNetworkPlugins/group/enable-default-cni/Start 81.43
316 TestNetworkPlugins/group/kindnet/Start 75.88
317 TestNetworkPlugins/group/kindnet/ControllerPod 6.01
318 TestNetworkPlugins/group/enable-default-cni/KubeletFlags 0.28
319 TestNetworkPlugins/group/enable-default-cni/NetCatPod 11.28
320 TestNetworkPlugins/group/kindnet/KubeletFlags 0.36
321 TestNetworkPlugins/group/kindnet/NetCatPod 9.33
322 TestNetworkPlugins/group/enable-default-cni/DNS 0.21
323 TestNetworkPlugins/group/enable-default-cni/Localhost 0.16
324 TestNetworkPlugins/group/enable-default-cni/HairPin 0.18
325 TestNetworkPlugins/group/kindnet/DNS 0.26
326 TestNetworkPlugins/group/kindnet/Localhost 0.18
327 TestNetworkPlugins/group/kindnet/HairPin 0.16
328 TestNetworkPlugins/group/bridge/Start 84.5
329 TestNetworkPlugins/group/kubenet/Start 90.97
330 TestNetworkPlugins/group/bridge/KubeletFlags 0.3
331 TestNetworkPlugins/group/bridge/NetCatPod 11.28
332 TestNetworkPlugins/group/kubenet/KubeletFlags 0.27
333 TestNetworkPlugins/group/kubenet/NetCatPod 10.27
334 TestNetworkPlugins/group/bridge/DNS 0.23
335 TestNetworkPlugins/group/bridge/Localhost 0.17
336 TestNetworkPlugins/group/bridge/HairPin 0.18
337 TestNetworkPlugins/group/kubenet/DNS 0.24
338 TestNetworkPlugins/group/kubenet/Localhost 0.27
339 TestNetworkPlugins/group/kubenet/HairPin 0.27
340 TestNetworkPlugins/group/custom-flannel/Start 65.76
341 TestNetworkPlugins/group/calico/Start 77.54
342 TestNetworkPlugins/group/custom-flannel/KubeletFlags 0.51
343 TestNetworkPlugins/group/custom-flannel/NetCatPod 12.43
344 TestNetworkPlugins/group/custom-flannel/DNS 0.28
345 TestNetworkPlugins/group/custom-flannel/Localhost 0.23
346 TestNetworkPlugins/group/custom-flannel/HairPin 0.25
347 TestNetworkPlugins/group/calico/ControllerPod 6.01
348 TestNetworkPlugins/group/calico/KubeletFlags 0.41
349 TestNetworkPlugins/group/calico/NetCatPod 11.39
350 TestNetworkPlugins/group/false/Start 61.21
351 TestNetworkPlugins/group/calico/DNS 0.47
352 TestNetworkPlugins/group/calico/Localhost 0.18
353 TestNetworkPlugins/group/calico/HairPin 0.19
355 TestStartStop/group/old-k8s-version/serial/FirstStart 151.9
356 TestNetworkPlugins/group/false/KubeletFlags 0.29
357 TestNetworkPlugins/group/false/NetCatPod 13.25
358 TestNetworkPlugins/group/false/DNS 0.19
359 TestNetworkPlugins/group/false/Localhost 0.17
360 TestNetworkPlugins/group/false/HairPin 0.2
362 TestStartStop/group/no-preload/serial/FirstStart 81.62
363 TestStartStop/group/old-k8s-version/serial/DeployApp 8.58
364 TestStartStop/group/no-preload/serial/DeployApp 7.45
365 TestStartStop/group/no-preload/serial/EnableAddonWhileActive 1.32
366 TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive 1.75
367 TestStartStop/group/no-preload/serial/Stop 11.05
368 TestStartStop/group/old-k8s-version/serial/Stop 11.72
369 TestStartStop/group/no-preload/serial/EnableAddonAfterStop 0.17
370 TestStartStop/group/no-preload/serial/SecondStart 281.1
371 TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop 0.26
373 TestStartStop/group/no-preload/serial/UserAppExistsAfterStop 6.01
374 TestStartStop/group/no-preload/serial/AddonExistsAfterStop 5.11
375 TestStartStop/group/no-preload/serial/VerifyKubernetesImages 0.22
376 TestStartStop/group/no-preload/serial/Pause 2.87
378 TestStartStop/group/embed-certs/serial/FirstStart 77.73
379 TestStartStop/group/embed-certs/serial/DeployApp 8.36
380 TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop 6.01
381 TestStartStop/group/embed-certs/serial/EnableAddonWhileActive 1.07
382 TestStartStop/group/embed-certs/serial/Stop 11.19
383 TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop 5.1
384 TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages 0.22
385 TestStartStop/group/old-k8s-version/serial/Pause 2.97
386 TestStartStop/group/embed-certs/serial/EnableAddonAfterStop 0.26
387 TestStartStop/group/embed-certs/serial/SecondStart 272.6
389 TestStartStop/group/default-k8s-diff-port/serial/FirstStart 53.8
390 TestStartStop/group/default-k8s-diff-port/serial/DeployApp 9.37
391 TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive 1.08
392 TestStartStop/group/default-k8s-diff-port/serial/Stop 10.86
393 TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop 0.18
394 TestStartStop/group/default-k8s-diff-port/serial/SecondStart 267.09
395 TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop 6.01
396 TestStartStop/group/embed-certs/serial/AddonExistsAfterStop 6.11
397 TestStartStop/group/embed-certs/serial/VerifyKubernetesImages 0.23
398 TestStartStop/group/embed-certs/serial/Pause 2.85
400 TestStartStop/group/newest-cni/serial/FirstStart 40.28
401 TestStartStop/group/newest-cni/serial/DeployApp 0
402 TestStartStop/group/newest-cni/serial/EnableAddonWhileActive 1.18
403 TestStartStop/group/newest-cni/serial/Stop 5.81
404 TestStartStop/group/newest-cni/serial/EnableAddonAfterStop 0.27
405 TestStartStop/group/newest-cni/serial/SecondStart 19.51
406 TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop 6.01
407 TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop 6.19
408 TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop 0
409 TestStartStop/group/newest-cni/serial/AddonExistsAfterStop 0
410 TestStartStop/group/newest-cni/serial/VerifyKubernetesImages 0.59
411 TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages 0.31
412 TestStartStop/group/default-k8s-diff-port/serial/Pause 4.34
413 TestStartStop/group/newest-cni/serial/Pause 4.49
x
+
TestDownloadOnly/v1.20.0/json-events (12.88s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/json-events
aaa_download_only_test.go:81: (dbg) Run:  out/minikube-linux-arm64 start -o=json --download-only -p download-only-244388 --force --alsologtostderr --kubernetes-version=v1.20.0 --container-runtime=docker --driver=docker  --container-runtime=docker
aaa_download_only_test.go:81: (dbg) Done: out/minikube-linux-arm64 start -o=json --download-only -p download-only-244388 --force --alsologtostderr --kubernetes-version=v1.20.0 --container-runtime=docker --driver=docker  --container-runtime=docker: (12.874790085s)
--- PASS: TestDownloadOnly/v1.20.0/json-events (12.88s)

                                                
                                    
x
+
TestDownloadOnly/v1.20.0/preload-exists (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/preload-exists
--- PASS: TestDownloadOnly/v1.20.0/preload-exists (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.20.0/LogsDuration (0.07s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/LogsDuration
aaa_download_only_test.go:184: (dbg) Run:  out/minikube-linux-arm64 logs -p download-only-244388
aaa_download_only_test.go:184: (dbg) Non-zero exit: out/minikube-linux-arm64 logs -p download-only-244388: exit status 85 (70.979436ms)

                                                
                                                
-- stdout --
	
	==> Audit <==
	|---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	| Command |              Args              |       Profile        |  User   | Version |     Start Time      | End Time |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	| start   | -o=json --download-only        | download-only-244388 | jenkins | v1.33.1 | 15 Aug 24 23:05 UTC |          |
	|         | -p download-only-244388        |                      |         |         |                     |          |
	|         | --force --alsologtostderr      |                      |         |         |                     |          |
	|         | --kubernetes-version=v1.20.0   |                      |         |         |                     |          |
	|         | --container-runtime=docker     |                      |         |         |                     |          |
	|         | --driver=docker                |                      |         |         |                     |          |
	|         | --container-runtime=docker     |                      |         |         |                     |          |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|----------|
	
	
	==> Last Start <==
	Log file created at: 2024/08/15 23:05:34
	Running on machine: ip-172-31-29-130
	Binary: Built with gc go1.22.5 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0815 23:05:34.004199 2031402 out.go:345] Setting OutFile to fd 1 ...
	I0815 23:05:34.004409 2031402 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:05:34.004415 2031402 out.go:358] Setting ErrFile to fd 2...
	I0815 23:05:34.004420 2031402 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:05:34.004697 2031402 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
	W0815 23:05:34.004862 2031402 root.go:314] Error reading config file at /home/jenkins/minikube-integration/19452-2026001/.minikube/config/config.json: open /home/jenkins/minikube-integration/19452-2026001/.minikube/config/config.json: no such file or directory
	I0815 23:05:34.005339 2031402 out.go:352] Setting JSON to true
	I0815 23:05:34.006408 2031402 start.go:129] hostinfo: {"hostname":"ip-172-31-29-130","uptime":28078,"bootTime":1723735056,"procs":149,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1067-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"36adf542-ef4f-4e2d-a0c8-6868d1383ff9"}
	I0815 23:05:34.006503 2031402 start.go:139] virtualization:  
	I0815 23:05:34.010605 2031402 out.go:97] [download-only-244388] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	W0815 23:05:34.010841 2031402 preload.go:293] Failed to list preload files: open /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball: no such file or directory
	I0815 23:05:34.010907 2031402 notify.go:220] Checking for updates...
	I0815 23:05:34.014152 2031402 out.go:169] MINIKUBE_LOCATION=19452
	I0815 23:05:34.016994 2031402 out.go:169] MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0815 23:05:34.019690 2031402 out.go:169] KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig
	I0815 23:05:34.022232 2031402 out.go:169] MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube
	I0815 23:05:34.024513 2031402 out.go:169] MINIKUBE_BIN=out/minikube-linux-arm64
	W0815 23:05:34.028627 2031402 out.go:321] minikube skips various validations when --force is supplied; this may lead to unexpected behavior
	I0815 23:05:34.028917 2031402 driver.go:392] Setting default libvirt URI to qemu:///system
	I0815 23:05:34.053989 2031402 docker.go:123] docker version: linux-27.1.2:Docker Engine - Community
	I0815 23:05:34.054086 2031402 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0815 23:05:34.108403 2031402 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:52 SystemTime:2024-08-15 23:05:34.099179831 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
	I0815 23:05:34.108519 2031402 docker.go:307] overlay module found
	I0815 23:05:34.110532 2031402 out.go:97] Using the docker driver based on user configuration
	I0815 23:05:34.110557 2031402 start.go:297] selected driver: docker
	I0815 23:05:34.110564 2031402 start.go:901] validating driver "docker" against <nil>
	I0815 23:05:34.110685 2031402 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0815 23:05:34.162013 2031402 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:52 SystemTime:2024-08-15 23:05:34.152837496 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
	I0815 23:05:34.162192 2031402 start_flags.go:310] no existing cluster config was found, will generate one from the flags 
	I0815 23:05:34.162491 2031402 start_flags.go:393] Using suggested 2200MB memory alloc based on sys=7834MB, container=7834MB
	I0815 23:05:34.162649 2031402 start_flags.go:929] Wait components to verify : map[apiserver:true system_pods:true]
	I0815 23:05:34.165285 2031402 out.go:169] Using Docker driver with root privileges
	I0815 23:05:34.167122 2031402 cni.go:84] Creating CNI manager for ""
	I0815 23:05:34.167157 2031402 cni.go:162] CNI unnecessary in this configuration, recommending no CNI
	I0815 23:05:34.167244 2031402 start.go:340] cluster config:
	{Name:download-only-244388 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:download-only-244388 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Co
ntainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0815 23:05:34.169274 2031402 out.go:97] Starting "download-only-244388" primary control-plane node in "download-only-244388" cluster
	I0815 23:05:34.169293 2031402 cache.go:121] Beginning downloading kic base image for docker with docker
	I0815 23:05:34.171360 2031402 out.go:97] Pulling base image v0.0.44-1723740748-19452 ...
	I0815 23:05:34.171394 2031402 preload.go:131] Checking if preload exists for k8s version v1.20.0 and runtime docker
	I0815 23:05:34.171559 2031402 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local docker daemon
	I0815 23:05:34.187225 2031402 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d to local cache
	I0815 23:05:34.187417 2031402 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local cache directory
	I0815 23:05:34.187517 2031402 image.go:148] Writing gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d to local cache
	I0815 23:05:34.236266 2031402 preload.go:118] Found remote preload: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.20.0/preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4
	I0815 23:05:34.236290 2031402 cache.go:56] Caching tarball of preloaded images
	I0815 23:05:34.236456 2031402 preload.go:131] Checking if preload exists for k8s version v1.20.0 and runtime docker
	I0815 23:05:34.238912 2031402 out.go:97] Downloading Kubernetes v1.20.0 preload ...
	I0815 23:05:34.238945 2031402 preload.go:236] getting checksum for preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4 ...
	I0815 23:05:34.323610 2031402 download.go:107] Downloading: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.20.0/preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4?checksum=md5:1a3e8f9b29e6affec63d76d0d3000942 -> /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4
	I0815 23:05:39.750877 2031402 preload.go:247] saving checksum for preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4 ...
	I0815 23:05:39.751039 2031402 preload.go:254] verifying checksum of /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4 ...
	I0815 23:05:40.934953 2031402 cache.go:59] Finished verifying existence of preloaded tar for v1.20.0 on docker
	I0815 23:05:40.935321 2031402 profile.go:143] Saving config to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/download-only-244388/config.json ...
	I0815 23:05:40.935357 2031402 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/download-only-244388/config.json: {Name:mkd179f6fa4af488e710c76d58cf458cf368bde1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I0815 23:05:40.935551 2031402 preload.go:131] Checking if preload exists for k8s version v1.20.0 and runtime docker
	I0815 23:05:40.935745 2031402 download.go:107] Downloading: https://dl.k8s.io/release/v1.20.0/bin/linux/arm64/kubectl?checksum=file:https://dl.k8s.io/release/v1.20.0/bin/linux/arm64/kubectl.sha256 -> /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/linux/arm64/v1.20.0/kubectl
	I0815 23:05:41.481289 2031402 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d as a tarball
	
	
	* The control-plane node download-only-244388 host does not exist
	  To start a cluster, run: "minikube start -p download-only-244388"

                                                
                                                
-- /stdout --
aaa_download_only_test.go:185: minikube logs failed with error: exit status 85
--- PASS: TestDownloadOnly/v1.20.0/LogsDuration (0.07s)

                                                
                                    
x
+
TestDownloadOnly/v1.20.0/DeleteAll (0.2s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/DeleteAll
aaa_download_only_test.go:197: (dbg) Run:  out/minikube-linux-arm64 delete --all
--- PASS: TestDownloadOnly/v1.20.0/DeleteAll (0.20s)

                                                
                                    
x
+
TestDownloadOnly/v1.20.0/DeleteAlwaysSucceeds (0.13s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/DeleteAlwaysSucceeds
aaa_download_only_test.go:208: (dbg) Run:  out/minikube-linux-arm64 delete -p download-only-244388
--- PASS: TestDownloadOnly/v1.20.0/DeleteAlwaysSucceeds (0.13s)

                                                
                                    
x
+
TestDownloadOnly/v1.31.0/json-events (6.71s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.31.0/json-events
aaa_download_only_test.go:81: (dbg) Run:  out/minikube-linux-arm64 start -o=json --download-only -p download-only-872081 --force --alsologtostderr --kubernetes-version=v1.31.0 --container-runtime=docker --driver=docker  --container-runtime=docker
aaa_download_only_test.go:81: (dbg) Done: out/minikube-linux-arm64 start -o=json --download-only -p download-only-872081 --force --alsologtostderr --kubernetes-version=v1.31.0 --container-runtime=docker --driver=docker  --container-runtime=docker: (6.7143163s)
--- PASS: TestDownloadOnly/v1.31.0/json-events (6.71s)

                                                
                                    
x
+
TestDownloadOnly/v1.31.0/preload-exists (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.31.0/preload-exists
--- PASS: TestDownloadOnly/v1.31.0/preload-exists (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.31.0/LogsDuration (0.08s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.31.0/LogsDuration
aaa_download_only_test.go:184: (dbg) Run:  out/minikube-linux-arm64 logs -p download-only-872081
aaa_download_only_test.go:184: (dbg) Non-zero exit: out/minikube-linux-arm64 logs -p download-only-872081: exit status 85 (78.405406ms)

                                                
                                                
-- stdout --
	
	==> Audit <==
	|---------|--------------------------------|----------------------|---------|---------|---------------------|---------------------|
	| Command |              Args              |       Profile        |  User   | Version |     Start Time      |      End Time       |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|---------------------|
	| start   | -o=json --download-only        | download-only-244388 | jenkins | v1.33.1 | 15 Aug 24 23:05 UTC |                     |
	|         | -p download-only-244388        |                      |         |         |                     |                     |
	|         | --force --alsologtostderr      |                      |         |         |                     |                     |
	|         | --kubernetes-version=v1.20.0   |                      |         |         |                     |                     |
	|         | --container-runtime=docker     |                      |         |         |                     |                     |
	|         | --driver=docker                |                      |         |         |                     |                     |
	|         | --container-runtime=docker     |                      |         |         |                     |                     |
	| delete  | --all                          | minikube             | jenkins | v1.33.1 | 15 Aug 24 23:05 UTC | 15 Aug 24 23:05 UTC |
	| delete  | -p download-only-244388        | download-only-244388 | jenkins | v1.33.1 | 15 Aug 24 23:05 UTC | 15 Aug 24 23:05 UTC |
	| start   | -o=json --download-only        | download-only-872081 | jenkins | v1.33.1 | 15 Aug 24 23:05 UTC |                     |
	|         | -p download-only-872081        |                      |         |         |                     |                     |
	|         | --force --alsologtostderr      |                      |         |         |                     |                     |
	|         | --kubernetes-version=v1.31.0   |                      |         |         |                     |                     |
	|         | --container-runtime=docker     |                      |         |         |                     |                     |
	|         | --driver=docker                |                      |         |         |                     |                     |
	|         | --container-runtime=docker     |                      |         |         |                     |                     |
	|---------|--------------------------------|----------------------|---------|---------|---------------------|---------------------|
	
	
	==> Last Start <==
	Log file created at: 2024/08/15 23:05:47
	Running on machine: ip-172-31-29-130
	Binary: Built with gc go1.22.5 for linux/arm64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I0815 23:05:47.284465 2031604 out.go:345] Setting OutFile to fd 1 ...
	I0815 23:05:47.284595 2031604 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:05:47.284606 2031604 out.go:358] Setting ErrFile to fd 2...
	I0815 23:05:47.284611 2031604 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:05:47.284935 2031604 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
	I0815 23:05:47.285395 2031604 out.go:352] Setting JSON to true
	I0815 23:05:47.286262 2031604 start.go:129] hostinfo: {"hostname":"ip-172-31-29-130","uptime":28092,"bootTime":1723735056,"procs":146,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1067-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"36adf542-ef4f-4e2d-a0c8-6868d1383ff9"}
	I0815 23:05:47.286366 2031604 start.go:139] virtualization:  
	I0815 23:05:47.288636 2031604 out.go:97] [download-only-872081] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	I0815 23:05:47.288893 2031604 notify.go:220] Checking for updates...
	I0815 23:05:47.290653 2031604 out.go:169] MINIKUBE_LOCATION=19452
	I0815 23:05:47.292338 2031604 out.go:169] MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0815 23:05:47.294154 2031604 out.go:169] KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig
	I0815 23:05:47.295854 2031604 out.go:169] MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube
	I0815 23:05:47.297477 2031604 out.go:169] MINIKUBE_BIN=out/minikube-linux-arm64
	W0815 23:05:47.300602 2031604 out.go:321] minikube skips various validations when --force is supplied; this may lead to unexpected behavior
	I0815 23:05:47.300901 2031604 driver.go:392] Setting default libvirt URI to qemu:///system
	I0815 23:05:47.321602 2031604 docker.go:123] docker version: linux-27.1.2:Docker Engine - Community
	I0815 23:05:47.321756 2031604 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0815 23:05:47.382038 2031604 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:44 SystemTime:2024-08-15 23:05:47.373105322 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
	I0815 23:05:47.382155 2031604 docker.go:307] overlay module found
	I0815 23:05:47.384344 2031604 out.go:97] Using the docker driver based on user configuration
	I0815 23:05:47.384375 2031604 start.go:297] selected driver: docker
	I0815 23:05:47.384384 2031604 start.go:901] validating driver "docker" against <nil>
	I0815 23:05:47.384500 2031604 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0815 23:05:47.434268 2031604 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:44 SystemTime:2024-08-15 23:05:47.425601799 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
	I0815 23:05:47.434441 2031604 start_flags.go:310] no existing cluster config was found, will generate one from the flags 
	I0815 23:05:47.434731 2031604 start_flags.go:393] Using suggested 2200MB memory alloc based on sys=7834MB, container=7834MB
	I0815 23:05:47.434885 2031604 start_flags.go:929] Wait components to verify : map[apiserver:true system_pods:true]
	I0815 23:05:47.436790 2031604 out.go:169] Using Docker driver with root privileges
	I0815 23:05:47.438496 2031604 cni.go:84] Creating CNI manager for ""
	I0815 23:05:47.438524 2031604 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I0815 23:05:47.438534 2031604 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
	I0815 23:05:47.438612 2031604 start.go:340] cluster config:
	{Name:download-only-872081 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:download-only-872081 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Co
ntainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0815 23:05:47.440574 2031604 out.go:97] Starting "download-only-872081" primary control-plane node in "download-only-872081" cluster
	I0815 23:05:47.440595 2031604 cache.go:121] Beginning downloading kic base image for docker with docker
	I0815 23:05:47.442454 2031604 out.go:97] Pulling base image v0.0.44-1723740748-19452 ...
	I0815 23:05:47.442486 2031604 preload.go:131] Checking if preload exists for k8s version v1.31.0 and runtime docker
	I0815 23:05:47.442697 2031604 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local docker daemon
	I0815 23:05:47.458359 2031604 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d to local cache
	I0815 23:05:47.458478 2031604 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local cache directory
	I0815 23:05:47.458505 2031604 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local cache directory, skipping pull
	I0815 23:05:47.458516 2031604 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d exists in cache, skipping pull
	I0815 23:05:47.458538 2031604 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d as a tarball
	I0815 23:05:47.522520 2031604 preload.go:118] Found remote preload: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.31.0/preloaded-images-k8s-v18-v1.31.0-docker-overlay2-arm64.tar.lz4
	I0815 23:05:47.522551 2031604 cache.go:56] Caching tarball of preloaded images
	I0815 23:05:47.522724 2031604 preload.go:131] Checking if preload exists for k8s version v1.31.0 and runtime docker
	I0815 23:05:47.524901 2031604 out.go:97] Downloading Kubernetes v1.31.0 preload ...
	I0815 23:05:47.524923 2031604 preload.go:236] getting checksum for preloaded-images-k8s-v18-v1.31.0-docker-overlay2-arm64.tar.lz4 ...
	I0815 23:05:47.605732 2031604 download.go:107] Downloading: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.31.0/preloaded-images-k8s-v18-v1.31.0-docker-overlay2-arm64.tar.lz4?checksum=md5:90c22abece392b762c0b4e45be981bb4 -> /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.0-docker-overlay2-arm64.tar.lz4
	
	
	* The control-plane node download-only-872081 host does not exist
	  To start a cluster, run: "minikube start -p download-only-872081"

                                                
                                                
-- /stdout --
aaa_download_only_test.go:185: minikube logs failed with error: exit status 85
--- PASS: TestDownloadOnly/v1.31.0/LogsDuration (0.08s)

                                                
                                    
x
+
TestDownloadOnly/v1.31.0/DeleteAll (0.21s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.31.0/DeleteAll
aaa_download_only_test.go:197: (dbg) Run:  out/minikube-linux-arm64 delete --all
--- PASS: TestDownloadOnly/v1.31.0/DeleteAll (0.21s)

                                                
                                    
x
+
TestDownloadOnly/v1.31.0/DeleteAlwaysSucceeds (0.13s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.31.0/DeleteAlwaysSucceeds
aaa_download_only_test.go:208: (dbg) Run:  out/minikube-linux-arm64 delete -p download-only-872081
--- PASS: TestDownloadOnly/v1.31.0/DeleteAlwaysSucceeds (0.13s)

                                                
                                    
x
+
TestBinaryMirror (0.58s)

                                                
                                                
=== RUN   TestBinaryMirror
aaa_download_only_test.go:314: (dbg) Run:  out/minikube-linux-arm64 start --download-only -p binary-mirror-950283 --alsologtostderr --binary-mirror http://127.0.0.1:43097 --driver=docker  --container-runtime=docker
helpers_test.go:175: Cleaning up "binary-mirror-950283" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p binary-mirror-950283
--- PASS: TestBinaryMirror (0.58s)

                                                
                                    
x
+
TestOffline (85.2s)

                                                
                                                
=== RUN   TestOffline
=== PAUSE TestOffline

                                                
                                                

                                                
                                                
=== CONT  TestOffline
aab_offline_test.go:55: (dbg) Run:  out/minikube-linux-arm64 start -p offline-docker-324523 --alsologtostderr -v=1 --memory=2048 --wait=true --driver=docker  --container-runtime=docker
aab_offline_test.go:55: (dbg) Done: out/minikube-linux-arm64 start -p offline-docker-324523 --alsologtostderr -v=1 --memory=2048 --wait=true --driver=docker  --container-runtime=docker: (1m22.816760991s)
helpers_test.go:175: Cleaning up "offline-docker-324523" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p offline-docker-324523
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p offline-docker-324523: (2.378576861s)
--- PASS: TestOffline (85.20s)

                                                
                                    
x
+
TestAddons/PreSetup/EnablingAddonOnNonExistingCluster (0.07s)

                                                
                                                
=== RUN   TestAddons/PreSetup/EnablingAddonOnNonExistingCluster
=== PAUSE TestAddons/PreSetup/EnablingAddonOnNonExistingCluster

                                                
                                                

                                                
                                                
=== CONT  TestAddons/PreSetup/EnablingAddonOnNonExistingCluster
addons_test.go:1037: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p addons-083442
addons_test.go:1037: (dbg) Non-zero exit: out/minikube-linux-arm64 addons enable dashboard -p addons-083442: exit status 85 (66.450065ms)

                                                
                                                
-- stdout --
	* Profile "addons-083442" not found. Run "minikube profile list" to view all profiles.
	  To start a cluster, run: "minikube start -p addons-083442"

                                                
                                                
-- /stdout --
--- PASS: TestAddons/PreSetup/EnablingAddonOnNonExistingCluster (0.07s)

                                                
                                    
x
+
TestAddons/PreSetup/DisablingAddonOnNonExistingCluster (0.06s)

                                                
                                                
=== RUN   TestAddons/PreSetup/DisablingAddonOnNonExistingCluster
=== PAUSE TestAddons/PreSetup/DisablingAddonOnNonExistingCluster

                                                
                                                

                                                
                                                
=== CONT  TestAddons/PreSetup/DisablingAddonOnNonExistingCluster
addons_test.go:1048: (dbg) Run:  out/minikube-linux-arm64 addons disable dashboard -p addons-083442
addons_test.go:1048: (dbg) Non-zero exit: out/minikube-linux-arm64 addons disable dashboard -p addons-083442: exit status 85 (59.272296ms)

                                                
                                                
-- stdout --
	* Profile "addons-083442" not found. Run "minikube profile list" to view all profiles.
	  To start a cluster, run: "minikube start -p addons-083442"

                                                
                                                
-- /stdout --
--- PASS: TestAddons/PreSetup/DisablingAddonOnNonExistingCluster (0.06s)

                                                
                                    
x
+
TestAddons/Setup (220.31s)

                                                
                                                
=== RUN   TestAddons/Setup
addons_test.go:110: (dbg) Run:  out/minikube-linux-arm64 start -p addons-083442 --wait=true --memory=4000 --alsologtostderr --addons=registry --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --addons=storage-provisioner-rancher --addons=nvidia-device-plugin --addons=yakd --addons=volcano --driver=docker  --container-runtime=docker --addons=ingress --addons=ingress-dns
addons_test.go:110: (dbg) Done: out/minikube-linux-arm64 start -p addons-083442 --wait=true --memory=4000 --alsologtostderr --addons=registry --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --addons=storage-provisioner-rancher --addons=nvidia-device-plugin --addons=yakd --addons=volcano --driver=docker  --container-runtime=docker --addons=ingress --addons=ingress-dns: (3m40.3137195s)
--- PASS: TestAddons/Setup (220.31s)

                                                
                                    
x
+
TestAddons/serial/Volcano (42.09s)

                                                
                                                
=== RUN   TestAddons/serial/Volcano
addons_test.go:905: volcano-admission stabilized in 44.202171ms
addons_test.go:897: volcano-scheduler stabilized in 44.59915ms
addons_test.go:913: volcano-controller stabilized in 44.769993ms
addons_test.go:919: (dbg) TestAddons/serial/Volcano: waiting 6m0s for pods matching "app=volcano-scheduler" in namespace "volcano-system" ...
helpers_test.go:344: "volcano-scheduler-576bc46687-7nh9r" [8f939bb5-c7ae-4361-b8b0-69b322f7368f] Running
addons_test.go:919: (dbg) TestAddons/serial/Volcano: app=volcano-scheduler healthy within 6.004313316s
addons_test.go:923: (dbg) TestAddons/serial/Volcano: waiting 6m0s for pods matching "app=volcano-admission" in namespace "volcano-system" ...
helpers_test.go:344: "volcano-admission-77d7d48b68-8cnv4" [1cd892fb-c37b-455c-aa21-4f2d2c430764] Running
addons_test.go:923: (dbg) TestAddons/serial/Volcano: app=volcano-admission healthy within 6.004537743s
addons_test.go:927: (dbg) TestAddons/serial/Volcano: waiting 6m0s for pods matching "app=volcano-controller" in namespace "volcano-system" ...
helpers_test.go:344: "volcano-controllers-56675bb4d5-8767c" [929b4373-2912-441d-92c3-cfd5f8b1db44] Running
addons_test.go:927: (dbg) TestAddons/serial/Volcano: app=volcano-controller healthy within 5.003654882s
addons_test.go:932: (dbg) Run:  kubectl --context addons-083442 delete -n volcano-system job volcano-admission-init
addons_test.go:938: (dbg) Run:  kubectl --context addons-083442 create -f testdata/vcjob.yaml
addons_test.go:946: (dbg) Run:  kubectl --context addons-083442 get vcjob -n my-volcano
addons_test.go:964: (dbg) TestAddons/serial/Volcano: waiting 3m0s for pods matching "volcano.sh/job-name=test-job" in namespace "my-volcano" ...
helpers_test.go:344: "test-job-nginx-0" [36726236-015d-42e9-82c1-2c2cbf32e6be] Pending
helpers_test.go:344: "test-job-nginx-0" [36726236-015d-42e9-82c1-2c2cbf32e6be] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "test-job-nginx-0" [36726236-015d-42e9-82c1-2c2cbf32e6be] Running
addons_test.go:964: (dbg) TestAddons/serial/Volcano: volcano.sh/job-name=test-job healthy within 14.003817093s
addons_test.go:968: (dbg) Run:  out/minikube-linux-arm64 -p addons-083442 addons disable volcano --alsologtostderr -v=1
addons_test.go:968: (dbg) Done: out/minikube-linux-arm64 -p addons-083442 addons disable volcano --alsologtostderr -v=1: (10.454934691s)
--- PASS: TestAddons/serial/Volcano (42.09s)

                                                
                                    
x
+
TestAddons/serial/GCPAuth/Namespaces (0.19s)

                                                
                                                
=== RUN   TestAddons/serial/GCPAuth/Namespaces
addons_test.go:656: (dbg) Run:  kubectl --context addons-083442 create ns new-namespace
addons_test.go:670: (dbg) Run:  kubectl --context addons-083442 get secret gcp-auth -n new-namespace
--- PASS: TestAddons/serial/GCPAuth/Namespaces (0.19s)

                                                
                                    
x
+
TestAddons/parallel/Registry (16.88s)

                                                
                                                
=== RUN   TestAddons/parallel/Registry
=== PAUSE TestAddons/parallel/Registry

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Registry
addons_test.go:332: registry stabilized in 3.488715ms
addons_test.go:334: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-6fb4cdfc84-l2swm" [4a862dfc-1859-4b63-a82b-7cd192ec574f] Running
addons_test.go:334: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 6.016159646s
addons_test.go:337: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-proxy-xw6bc" [de2eade4-6989-4aa0-a9be-364dbd36959d] Running
addons_test.go:337: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 6.00353492s
addons_test.go:342: (dbg) Run:  kubectl --context addons-083442 delete po -l run=registry-test --now
addons_test.go:347: (dbg) Run:  kubectl --context addons-083442 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local"
addons_test.go:347: (dbg) Done: kubectl --context addons-083442 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": (3.971456551s)
addons_test.go:361: (dbg) Run:  out/minikube-linux-arm64 -p addons-083442 ip
2024/08/15 23:10:51 [DEBUG] GET http://192.168.49.2:5000
addons_test.go:390: (dbg) Run:  out/minikube-linux-arm64 -p addons-083442 addons disable registry --alsologtostderr -v=1
--- PASS: TestAddons/parallel/Registry (16.88s)

                                                
                                    
x
+
TestAddons/parallel/Ingress (19.13s)

                                                
                                                
=== RUN   TestAddons/parallel/Ingress
=== PAUSE TestAddons/parallel/Ingress

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Ingress
addons_test.go:209: (dbg) Run:  kubectl --context addons-083442 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:234: (dbg) Run:  kubectl --context addons-083442 replace --force -f testdata/nginx-ingress-v1.yaml
addons_test.go:247: (dbg) Run:  kubectl --context addons-083442 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:252: (dbg) TestAddons/parallel/Ingress: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:344: "nginx" [ab07a204-dc70-4307-829c-597221ef7d14] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx" [ab07a204-dc70-4307-829c-597221ef7d14] Running
addons_test.go:252: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 9.003865972s
addons_test.go:264: (dbg) Run:  out/minikube-linux-arm64 -p addons-083442 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
addons_test.go:288: (dbg) Run:  kubectl --context addons-083442 replace --force -f testdata/ingress-dns-example-v1.yaml
addons_test.go:293: (dbg) Run:  out/minikube-linux-arm64 -p addons-083442 ip
addons_test.go:299: (dbg) Run:  nslookup hello-john.test 192.168.49.2
addons_test.go:308: (dbg) Run:  out/minikube-linux-arm64 -p addons-083442 addons disable ingress-dns --alsologtostderr -v=1
addons_test.go:313: (dbg) Run:  out/minikube-linux-arm64 -p addons-083442 addons disable ingress --alsologtostderr -v=1
addons_test.go:313: (dbg) Done: out/minikube-linux-arm64 -p addons-083442 addons disable ingress --alsologtostderr -v=1: (7.706995786s)
--- PASS: TestAddons/parallel/Ingress (19.13s)

                                                
                                    
x
+
TestAddons/parallel/InspektorGadget (10.89s)

                                                
                                                
=== RUN   TestAddons/parallel/InspektorGadget
=== PAUSE TestAddons/parallel/InspektorGadget

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/InspektorGadget
addons_test.go:848: (dbg) TestAddons/parallel/InspektorGadget: waiting 8m0s for pods matching "k8s-app=gadget" in namespace "gadget" ...
helpers_test.go:344: "gadget-cxflj" [d1a59678-9996-4996-a190-6dee01fdaf80] Running / Ready:ContainersNotReady (containers with unready status: [gadget]) / ContainersReady:ContainersNotReady (containers with unready status: [gadget])
addons_test.go:848: (dbg) TestAddons/parallel/InspektorGadget: k8s-app=gadget healthy within 5.063087478s
addons_test.go:851: (dbg) Run:  out/minikube-linux-arm64 addons disable inspektor-gadget -p addons-083442
addons_test.go:851: (dbg) Done: out/minikube-linux-arm64 addons disable inspektor-gadget -p addons-083442: (5.823827128s)
--- PASS: TestAddons/parallel/InspektorGadget (10.89s)

                                                
                                    
x
+
TestAddons/parallel/MetricsServer (6.72s)

                                                
                                                
=== RUN   TestAddons/parallel/MetricsServer
=== PAUSE TestAddons/parallel/MetricsServer

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/MetricsServer
addons_test.go:409: metrics-server stabilized in 5.438388ms
addons_test.go:411: (dbg) TestAddons/parallel/MetricsServer: waiting 6m0s for pods matching "k8s-app=metrics-server" in namespace "kube-system" ...
helpers_test.go:344: "metrics-server-8988944d9-lxsbz" [5276d5b3-3d5f-4806-a86b-b7d8cf15b9f5] Running
addons_test.go:411: (dbg) TestAddons/parallel/MetricsServer: k8s-app=metrics-server healthy within 6.004045048s
addons_test.go:417: (dbg) Run:  kubectl --context addons-083442 top pods -n kube-system
addons_test.go:434: (dbg) Run:  out/minikube-linux-arm64 -p addons-083442 addons disable metrics-server --alsologtostderr -v=1
--- PASS: TestAddons/parallel/MetricsServer (6.72s)

                                                
                                    
x
+
TestAddons/parallel/CSI (38.98s)

                                                
                                                
=== RUN   TestAddons/parallel/CSI
=== PAUSE TestAddons/parallel/CSI

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/CSI
addons_test.go:567: csi-hostpath-driver pods stabilized in 8.038615ms
addons_test.go:570: (dbg) Run:  kubectl --context addons-083442 create -f testdata/csi-hostpath-driver/pvc.yaml
addons_test.go:575: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pvc "hpvc" in namespace "default" ...
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc -o jsonpath={.status.phase} -n default
addons_test.go:580: (dbg) Run:  kubectl --context addons-083442 create -f testdata/csi-hostpath-driver/pv-pod.yaml
addons_test.go:585: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pods matching "app=task-pv-pod" in namespace "default" ...
helpers_test.go:344: "task-pv-pod" [c2efe553-85aa-4c68-b8cc-541932ff7f86] Pending
helpers_test.go:344: "task-pv-pod" [c2efe553-85aa-4c68-b8cc-541932ff7f86] Running
addons_test.go:585: (dbg) TestAddons/parallel/CSI: app=task-pv-pod healthy within 6.004264416s
addons_test.go:590: (dbg) Run:  kubectl --context addons-083442 create -f testdata/csi-hostpath-driver/snapshot.yaml
addons_test.go:595: (dbg) TestAddons/parallel/CSI: waiting 6m0s for volume snapshot "new-snapshot-demo" in namespace "default" ...
helpers_test.go:419: (dbg) Run:  kubectl --context addons-083442 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default
helpers_test.go:419: (dbg) Run:  kubectl --context addons-083442 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default
addons_test.go:600: (dbg) Run:  kubectl --context addons-083442 delete pod task-pv-pod
addons_test.go:606: (dbg) Run:  kubectl --context addons-083442 delete pvc hpvc
addons_test.go:612: (dbg) Run:  kubectl --context addons-083442 create -f testdata/csi-hostpath-driver/pvc-restore.yaml
addons_test.go:617: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pvc "hpvc-restore" in namespace "default" ...
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
addons_test.go:622: (dbg) Run:  kubectl --context addons-083442 create -f testdata/csi-hostpath-driver/pv-pod-restore.yaml
addons_test.go:627: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pods matching "app=task-pv-pod-restore" in namespace "default" ...
helpers_test.go:344: "task-pv-pod-restore" [d42a7d1c-8f37-4a80-b42c-942eda3e000b] Pending
helpers_test.go:344: "task-pv-pod-restore" [d42a7d1c-8f37-4a80-b42c-942eda3e000b] Pending / Ready:ContainersNotReady (containers with unready status: [task-pv-container]) / ContainersReady:ContainersNotReady (containers with unready status: [task-pv-container])
helpers_test.go:344: "task-pv-pod-restore" [d42a7d1c-8f37-4a80-b42c-942eda3e000b] Running
addons_test.go:627: (dbg) TestAddons/parallel/CSI: app=task-pv-pod-restore healthy within 7.003917546s
addons_test.go:632: (dbg) Run:  kubectl --context addons-083442 delete pod task-pv-pod-restore
addons_test.go:636: (dbg) Run:  kubectl --context addons-083442 delete pvc hpvc-restore
addons_test.go:640: (dbg) Run:  kubectl --context addons-083442 delete volumesnapshot new-snapshot-demo
addons_test.go:644: (dbg) Run:  out/minikube-linux-arm64 -p addons-083442 addons disable csi-hostpath-driver --alsologtostderr -v=1
addons_test.go:644: (dbg) Done: out/minikube-linux-arm64 -p addons-083442 addons disable csi-hostpath-driver --alsologtostderr -v=1: (6.687565828s)
addons_test.go:648: (dbg) Run:  out/minikube-linux-arm64 -p addons-083442 addons disable volumesnapshots --alsologtostderr -v=1
--- PASS: TestAddons/parallel/CSI (38.98s)

                                                
                                    
x
+
TestAddons/parallel/Headlamp (16.85s)

                                                
                                                
=== RUN   TestAddons/parallel/Headlamp
=== PAUSE TestAddons/parallel/Headlamp

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Headlamp
addons_test.go:830: (dbg) Run:  out/minikube-linux-arm64 addons enable headlamp -p addons-083442 --alsologtostderr -v=1
addons_test.go:830: (dbg) Done: out/minikube-linux-arm64 addons enable headlamp -p addons-083442 --alsologtostderr -v=1: (1.153720962s)
addons_test.go:835: (dbg) TestAddons/parallel/Headlamp: waiting 8m0s for pods matching "app.kubernetes.io/name=headlamp" in namespace "headlamp" ...
helpers_test.go:344: "headlamp-57fb76fcdb-tbkkt" [c4b2033a-9aa4-453e-86f6-b9d9fcf2f381] Pending / Ready:ContainersNotReady (containers with unready status: [headlamp]) / ContainersReady:ContainersNotReady (containers with unready status: [headlamp])
helpers_test.go:344: "headlamp-57fb76fcdb-tbkkt" [c4b2033a-9aa4-453e-86f6-b9d9fcf2f381] Running
addons_test.go:835: (dbg) TestAddons/parallel/Headlamp: app.kubernetes.io/name=headlamp healthy within 10.003571922s
addons_test.go:839: (dbg) Run:  out/minikube-linux-arm64 -p addons-083442 addons disable headlamp --alsologtostderr -v=1
addons_test.go:839: (dbg) Done: out/minikube-linux-arm64 -p addons-083442 addons disable headlamp --alsologtostderr -v=1: (5.688041874s)
--- PASS: TestAddons/parallel/Headlamp (16.85s)

                                                
                                    
x
+
TestAddons/parallel/CloudSpanner (6.63s)

                                                
                                                
=== RUN   TestAddons/parallel/CloudSpanner
=== PAUSE TestAddons/parallel/CloudSpanner

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/CloudSpanner
addons_test.go:867: (dbg) TestAddons/parallel/CloudSpanner: waiting 6m0s for pods matching "app=cloud-spanner-emulator" in namespace "default" ...
helpers_test.go:344: "cloud-spanner-emulator-c4bc9b5f8-lqbv6" [625021f7-8466-4504-93cd-c8b9467e823a] Running
addons_test.go:867: (dbg) TestAddons/parallel/CloudSpanner: app=cloud-spanner-emulator healthy within 6.003265631s
addons_test.go:870: (dbg) Run:  out/minikube-linux-arm64 addons disable cloud-spanner -p addons-083442
--- PASS: TestAddons/parallel/CloudSpanner (6.63s)

                                                
                                    
x
+
TestAddons/parallel/LocalPath (52.85s)

                                                
                                                
=== RUN   TestAddons/parallel/LocalPath
=== PAUSE TestAddons/parallel/LocalPath

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/LocalPath
addons_test.go:982: (dbg) Run:  kubectl --context addons-083442 apply -f testdata/storage-provisioner-rancher/pvc.yaml
addons_test.go:988: (dbg) Run:  kubectl --context addons-083442 apply -f testdata/storage-provisioner-rancher/pod.yaml
addons_test.go:992: (dbg) TestAddons/parallel/LocalPath: waiting 5m0s for pvc "test-pvc" in namespace "default" ...
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:394: (dbg) Run:  kubectl --context addons-083442 get pvc test-pvc -o jsonpath={.status.phase} -n default
addons_test.go:995: (dbg) TestAddons/parallel/LocalPath: waiting 3m0s for pods matching "run=test-local-path" in namespace "default" ...
helpers_test.go:344: "test-local-path" [2f6ff74b-a0d6-4de3-8e8a-7964c4a28e4b] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "test-local-path" [2f6ff74b-a0d6-4de3-8e8a-7964c4a28e4b] Pending / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
helpers_test.go:344: "test-local-path" [2f6ff74b-a0d6-4de3-8e8a-7964c4a28e4b] Succeeded / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
addons_test.go:995: (dbg) TestAddons/parallel/LocalPath: run=test-local-path healthy within 4.004586297s
addons_test.go:1000: (dbg) Run:  kubectl --context addons-083442 get pvc test-pvc -o=json
addons_test.go:1009: (dbg) Run:  out/minikube-linux-arm64 -p addons-083442 ssh "cat /opt/local-path-provisioner/pvc-fb52b0b2-5164-4e9a-a0f6-d76fdf4aeb90_default_test-pvc/file1"
addons_test.go:1021: (dbg) Run:  kubectl --context addons-083442 delete pod test-local-path
addons_test.go:1025: (dbg) Run:  kubectl --context addons-083442 delete pvc test-pvc
addons_test.go:1029: (dbg) Run:  out/minikube-linux-arm64 -p addons-083442 addons disable storage-provisioner-rancher --alsologtostderr -v=1
addons_test.go:1029: (dbg) Done: out/minikube-linux-arm64 -p addons-083442 addons disable storage-provisioner-rancher --alsologtostderr -v=1: (43.369643517s)
--- PASS: TestAddons/parallel/LocalPath (52.85s)

                                                
                                    
x
+
TestAddons/parallel/NvidiaDevicePlugin (6.6s)

                                                
                                                
=== RUN   TestAddons/parallel/NvidiaDevicePlugin
=== PAUSE TestAddons/parallel/NvidiaDevicePlugin

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/NvidiaDevicePlugin
addons_test.go:1061: (dbg) TestAddons/parallel/NvidiaDevicePlugin: waiting 6m0s for pods matching "name=nvidia-device-plugin-ds" in namespace "kube-system" ...
helpers_test.go:344: "nvidia-device-plugin-daemonset-lkg89" [f685571b-eb16-4c68-bdd5-f2c1e9ed8fb4] Running
addons_test.go:1061: (dbg) TestAddons/parallel/NvidiaDevicePlugin: name=nvidia-device-plugin-ds healthy within 6.004046447s
addons_test.go:1064: (dbg) Run:  out/minikube-linux-arm64 addons disable nvidia-device-plugin -p addons-083442
--- PASS: TestAddons/parallel/NvidiaDevicePlugin (6.60s)

                                                
                                    
x
+
TestAddons/parallel/Yakd (11.7s)

                                                
                                                
=== RUN   TestAddons/parallel/Yakd
=== PAUSE TestAddons/parallel/Yakd

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Yakd
addons_test.go:1072: (dbg) TestAddons/parallel/Yakd: waiting 2m0s for pods matching "app.kubernetes.io/name=yakd-dashboard" in namespace "yakd-dashboard" ...
helpers_test.go:344: "yakd-dashboard-67d98fc6b-6n2rc" [a6eb0b02-aa1b-47b0-aeb4-94c96fd6c47e] Running
addons_test.go:1072: (dbg) TestAddons/parallel/Yakd: app.kubernetes.io/name=yakd-dashboard healthy within 6.005443096s
addons_test.go:1076: (dbg) Run:  out/minikube-linux-arm64 -p addons-083442 addons disable yakd --alsologtostderr -v=1
addons_test.go:1076: (dbg) Done: out/minikube-linux-arm64 -p addons-083442 addons disable yakd --alsologtostderr -v=1: (5.696467403s)
--- PASS: TestAddons/parallel/Yakd (11.70s)

                                                
                                    
x
+
TestAddons/StoppedEnableDisable (11.03s)

                                                
                                                
=== RUN   TestAddons/StoppedEnableDisable
addons_test.go:174: (dbg) Run:  out/minikube-linux-arm64 stop -p addons-083442
addons_test.go:174: (dbg) Done: out/minikube-linux-arm64 stop -p addons-083442: (10.771827074s)
addons_test.go:178: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p addons-083442
addons_test.go:182: (dbg) Run:  out/minikube-linux-arm64 addons disable dashboard -p addons-083442
addons_test.go:187: (dbg) Run:  out/minikube-linux-arm64 addons disable gvisor -p addons-083442
--- PASS: TestAddons/StoppedEnableDisable (11.03s)

                                                
                                    
x
+
TestCertOptions (38s)

                                                
                                                
=== RUN   TestCertOptions
=== PAUSE TestCertOptions

                                                
                                                

                                                
                                                
=== CONT  TestCertOptions
cert_options_test.go:49: (dbg) Run:  out/minikube-linux-arm64 start -p cert-options-084483 --memory=2048 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker  --container-runtime=docker
cert_options_test.go:49: (dbg) Done: out/minikube-linux-arm64 start -p cert-options-084483 --memory=2048 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker  --container-runtime=docker: (35.263480442s)
cert_options_test.go:60: (dbg) Run:  out/minikube-linux-arm64 -p cert-options-084483 ssh "openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt"
cert_options_test.go:88: (dbg) Run:  kubectl --context cert-options-084483 config view
cert_options_test.go:100: (dbg) Run:  out/minikube-linux-arm64 ssh -p cert-options-084483 -- "sudo cat /etc/kubernetes/admin.conf"
helpers_test.go:175: Cleaning up "cert-options-084483" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p cert-options-084483
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p cert-options-084483: (2.097879337s)
--- PASS: TestCertOptions (38.00s)

                                                
                                    
x
+
TestCertExpiration (247.09s)

                                                
                                                
=== RUN   TestCertExpiration
=== PAUSE TestCertExpiration

                                                
                                                

                                                
                                                
=== CONT  TestCertExpiration
cert_options_test.go:123: (dbg) Run:  out/minikube-linux-arm64 start -p cert-expiration-428160 --memory=2048 --cert-expiration=3m --driver=docker  --container-runtime=docker
E0815 23:47:39.261275 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
cert_options_test.go:123: (dbg) Done: out/minikube-linux-arm64 start -p cert-expiration-428160 --memory=2048 --cert-expiration=3m --driver=docker  --container-runtime=docker: (39.673761025s)
cert_options_test.go:131: (dbg) Run:  out/minikube-linux-arm64 start -p cert-expiration-428160 --memory=2048 --cert-expiration=8760h --driver=docker  --container-runtime=docker
cert_options_test.go:131: (dbg) Done: out/minikube-linux-arm64 start -p cert-expiration-428160 --memory=2048 --cert-expiration=8760h --driver=docker  --container-runtime=docker: (25.346700623s)
helpers_test.go:175: Cleaning up "cert-expiration-428160" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p cert-expiration-428160
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p cert-expiration-428160: (2.06891747s)
--- PASS: TestCertExpiration (247.09s)

                                                
                                    
x
+
TestDockerFlags (47.07s)

                                                
                                                
=== RUN   TestDockerFlags
=== PAUSE TestDockerFlags

                                                
                                                

                                                
                                                
=== CONT  TestDockerFlags
docker_test.go:51: (dbg) Run:  out/minikube-linux-arm64 start -p docker-flags-559933 --cache-images=false --memory=2048 --install-addons=false --wait=false --docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true --alsologtostderr -v=5 --driver=docker  --container-runtime=docker
docker_test.go:51: (dbg) Done: out/minikube-linux-arm64 start -p docker-flags-559933 --cache-images=false --memory=2048 --install-addons=false --wait=false --docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true --alsologtostderr -v=5 --driver=docker  --container-runtime=docker: (44.115268806s)
docker_test.go:56: (dbg) Run:  out/minikube-linux-arm64 -p docker-flags-559933 ssh "sudo systemctl show docker --property=Environment --no-pager"
docker_test.go:67: (dbg) Run:  out/minikube-linux-arm64 -p docker-flags-559933 ssh "sudo systemctl show docker --property=ExecStart --no-pager"
helpers_test.go:175: Cleaning up "docker-flags-559933" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p docker-flags-559933
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p docker-flags-559933: (2.152137898s)
--- PASS: TestDockerFlags (47.07s)

                                                
                                    
x
+
TestForceSystemdFlag (35.01s)

                                                
                                                
=== RUN   TestForceSystemdFlag
=== PAUSE TestForceSystemdFlag

                                                
                                                

                                                
                                                
=== CONT  TestForceSystemdFlag
docker_test.go:91: (dbg) Run:  out/minikube-linux-arm64 start -p force-systemd-flag-007674 --memory=2048 --force-systemd --alsologtostderr -v=5 --driver=docker  --container-runtime=docker
docker_test.go:91: (dbg) Done: out/minikube-linux-arm64 start -p force-systemd-flag-007674 --memory=2048 --force-systemd --alsologtostderr -v=5 --driver=docker  --container-runtime=docker: (32.489908375s)
docker_test.go:110: (dbg) Run:  out/minikube-linux-arm64 -p force-systemd-flag-007674 ssh "docker info --format {{.CgroupDriver}}"
helpers_test.go:175: Cleaning up "force-systemd-flag-007674" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p force-systemd-flag-007674
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p force-systemd-flag-007674: (2.164771548s)
--- PASS: TestForceSystemdFlag (35.01s)

                                                
                                    
x
+
TestForceSystemdEnv (42.44s)

                                                
                                                
=== RUN   TestForceSystemdEnv
=== PAUSE TestForceSystemdEnv

                                                
                                                

                                                
                                                
=== CONT  TestForceSystemdEnv
docker_test.go:155: (dbg) Run:  out/minikube-linux-arm64 start -p force-systemd-env-133401 --memory=2048 --alsologtostderr -v=5 --driver=docker  --container-runtime=docker
docker_test.go:155: (dbg) Done: out/minikube-linux-arm64 start -p force-systemd-env-133401 --memory=2048 --alsologtostderr -v=5 --driver=docker  --container-runtime=docker: (39.578987703s)
docker_test.go:110: (dbg) Run:  out/minikube-linux-arm64 -p force-systemd-env-133401 ssh "docker info --format {{.CgroupDriver}}"
helpers_test.go:175: Cleaning up "force-systemd-env-133401" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p force-systemd-env-133401
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p force-systemd-env-133401: (2.305086193s)
--- PASS: TestForceSystemdEnv (42.44s)

                                                
                                    
x
+
TestErrorSpam/setup (28.73s)

                                                
                                                
=== RUN   TestErrorSpam/setup
error_spam_test.go:81: (dbg) Run:  out/minikube-linux-arm64 start -p nospam-167239 -n=1 --memory=2250 --wait=false --log_dir=/tmp/nospam-167239 --driver=docker  --container-runtime=docker
error_spam_test.go:81: (dbg) Done: out/minikube-linux-arm64 start -p nospam-167239 -n=1 --memory=2250 --wait=false --log_dir=/tmp/nospam-167239 --driver=docker  --container-runtime=docker: (28.727825884s)
--- PASS: TestErrorSpam/setup (28.73s)

                                                
                                    
x
+
TestErrorSpam/start (0.71s)

                                                
                                                
=== RUN   TestErrorSpam/start
error_spam_test.go:216: Cleaning up 1 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 start --dry-run
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 start --dry-run
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 start --dry-run
--- PASS: TestErrorSpam/start (0.71s)

                                                
                                    
x
+
TestErrorSpam/status (1.22s)

                                                
                                                
=== RUN   TestErrorSpam/status
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 status
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 status
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 status
--- PASS: TestErrorSpam/status (1.22s)

                                                
                                    
x
+
TestErrorSpam/pause (1.41s)

                                                
                                                
=== RUN   TestErrorSpam/pause
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 pause
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 pause
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 pause
--- PASS: TestErrorSpam/pause (1.41s)

                                                
                                    
x
+
TestErrorSpam/unpause (1.46s)

                                                
                                                
=== RUN   TestErrorSpam/unpause
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 unpause
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 unpause
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 unpause
--- PASS: TestErrorSpam/unpause (1.46s)

                                                
                                    
x
+
TestErrorSpam/stop (10.94s)

                                                
                                                
=== RUN   TestErrorSpam/stop
error_spam_test.go:216: Cleaning up 0 logfile(s) ...
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 stop
error_spam_test.go:159: (dbg) Done: out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 stop: (10.753479019s)
error_spam_test.go:159: (dbg) Run:  out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 stop
error_spam_test.go:182: (dbg) Run:  out/minikube-linux-arm64 -p nospam-167239 --log_dir /tmp/nospam-167239 stop
--- PASS: TestErrorSpam/stop (10.94s)

                                                
                                    
x
+
TestFunctional/serial/CopySyncFile (0s)

                                                
                                                
=== RUN   TestFunctional/serial/CopySyncFile
functional_test.go:1855: local sync path: /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/test/nested/copy/2031396/hosts
--- PASS: TestFunctional/serial/CopySyncFile (0.00s)

                                                
                                    
x
+
TestFunctional/serial/StartWithProxy (69.87s)

                                                
                                                
=== RUN   TestFunctional/serial/StartWithProxy
functional_test.go:2234: (dbg) Run:  out/minikube-linux-arm64 start -p functional-300199 --memory=4000 --apiserver-port=8441 --wait=all --driver=docker  --container-runtime=docker
functional_test.go:2234: (dbg) Done: out/minikube-linux-arm64 start -p functional-300199 --memory=4000 --apiserver-port=8441 --wait=all --driver=docker  --container-runtime=docker: (1m9.871731628s)
--- PASS: TestFunctional/serial/StartWithProxy (69.87s)

                                                
                                    
x
+
TestFunctional/serial/AuditLog (0s)

                                                
                                                
=== RUN   TestFunctional/serial/AuditLog
--- PASS: TestFunctional/serial/AuditLog (0.00s)

                                                
                                    
x
+
TestFunctional/serial/SoftStart (34.44s)

                                                
                                                
=== RUN   TestFunctional/serial/SoftStart
functional_test.go:659: (dbg) Run:  out/minikube-linux-arm64 start -p functional-300199 --alsologtostderr -v=8
E0815 23:14:36.184813 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:14:36.197334 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:14:36.208843 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:14:36.230324 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:14:36.271762 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:14:36.354105 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:14:36.515660 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:14:36.837350 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:14:37.479470 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:14:38.760903 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:14:41.322194 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:14:46.443942 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:14:56.685997 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
functional_test.go:659: (dbg) Done: out/minikube-linux-arm64 start -p functional-300199 --alsologtostderr -v=8: (34.437461892s)
functional_test.go:663: soft start took 34.442032756s for "functional-300199" cluster.
--- PASS: TestFunctional/serial/SoftStart (34.44s)

                                                
                                    
x
+
TestFunctional/serial/KubeContext (0.07s)

                                                
                                                
=== RUN   TestFunctional/serial/KubeContext
functional_test.go:681: (dbg) Run:  kubectl config current-context
--- PASS: TestFunctional/serial/KubeContext (0.07s)

                                                
                                    
x
+
TestFunctional/serial/KubectlGetPods (0.12s)

                                                
                                                
=== RUN   TestFunctional/serial/KubectlGetPods
functional_test.go:696: (dbg) Run:  kubectl --context functional-300199 get po -A
--- PASS: TestFunctional/serial/KubectlGetPods (0.12s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/add_remote (3.38s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/add_remote
functional_test.go:1049: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 cache add registry.k8s.io/pause:3.1
functional_test.go:1049: (dbg) Done: out/minikube-linux-arm64 -p functional-300199 cache add registry.k8s.io/pause:3.1: (1.299141997s)
functional_test.go:1049: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 cache add registry.k8s.io/pause:3.3
functional_test.go:1049: (dbg) Done: out/minikube-linux-arm64 -p functional-300199 cache add registry.k8s.io/pause:3.3: (1.184763122s)
functional_test.go:1049: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 cache add registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/add_remote (3.38s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/add_local (0.98s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/add_local
functional_test.go:1077: (dbg) Run:  docker build -t minikube-local-cache-test:functional-300199 /tmp/TestFunctionalserialCacheCmdcacheadd_local416197844/001
functional_test.go:1089: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 cache add minikube-local-cache-test:functional-300199
functional_test.go:1094: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 cache delete minikube-local-cache-test:functional-300199
functional_test.go:1083: (dbg) Run:  docker rmi minikube-local-cache-test:functional-300199
--- PASS: TestFunctional/serial/CacheCmd/cache/add_local (0.98s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/CacheDelete (0.06s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/CacheDelete
functional_test.go:1102: (dbg) Run:  out/minikube-linux-arm64 cache delete registry.k8s.io/pause:3.3
--- PASS: TestFunctional/serial/CacheCmd/cache/CacheDelete (0.06s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/list (0.07s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/list
functional_test.go:1110: (dbg) Run:  out/minikube-linux-arm64 cache list
--- PASS: TestFunctional/serial/CacheCmd/cache/list (0.07s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node (0.3s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node
functional_test.go:1124: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh sudo crictl images
--- PASS: TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node (0.30s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/cache_reload (1.56s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/cache_reload
functional_test.go:1147: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh sudo docker rmi registry.k8s.io/pause:latest
functional_test.go:1153: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh sudo crictl inspecti registry.k8s.io/pause:latest
functional_test.go:1153: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-300199 ssh sudo crictl inspecti registry.k8s.io/pause:latest: exit status 1 (279.280739ms)

                                                
                                                
-- stdout --
	FATA[0000] no such image "registry.k8s.io/pause:latest" present 

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test.go:1158: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 cache reload
functional_test.go:1163: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh sudo crictl inspecti registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/cache_reload (1.56s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/delete (0.11s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/delete
functional_test.go:1172: (dbg) Run:  out/minikube-linux-arm64 cache delete registry.k8s.io/pause:3.1
functional_test.go:1172: (dbg) Run:  out/minikube-linux-arm64 cache delete registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/delete (0.11s)

                                                
                                    
x
+
TestFunctional/serial/MinikubeKubectlCmd (0.13s)

                                                
                                                
=== RUN   TestFunctional/serial/MinikubeKubectlCmd
functional_test.go:716: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 kubectl -- --context functional-300199 get pods
--- PASS: TestFunctional/serial/MinikubeKubectlCmd (0.13s)

                                                
                                    
x
+
TestFunctional/serial/MinikubeKubectlCmdDirectly (0.13s)

                                                
                                                
=== RUN   TestFunctional/serial/MinikubeKubectlCmdDirectly
functional_test.go:741: (dbg) Run:  out/kubectl --context functional-300199 get pods
--- PASS: TestFunctional/serial/MinikubeKubectlCmdDirectly (0.13s)

                                                
                                    
x
+
TestFunctional/serial/ExtraConfig (44.48s)

                                                
                                                
=== RUN   TestFunctional/serial/ExtraConfig
functional_test.go:757: (dbg) Run:  out/minikube-linux-arm64 start -p functional-300199 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all
E0815 23:15:17.168154 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
functional_test.go:757: (dbg) Done: out/minikube-linux-arm64 start -p functional-300199 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all: (44.481871133s)
functional_test.go:761: restart took 44.481973169s for "functional-300199" cluster.
--- PASS: TestFunctional/serial/ExtraConfig (44.48s)

                                                
                                    
x
+
TestFunctional/serial/ComponentHealth (0.11s)

                                                
                                                
=== RUN   TestFunctional/serial/ComponentHealth
functional_test.go:810: (dbg) Run:  kubectl --context functional-300199 get po -l tier=control-plane -n kube-system -o=json
functional_test.go:825: etcd phase: Running
functional_test.go:835: etcd status: Ready
functional_test.go:825: kube-apiserver phase: Running
functional_test.go:835: kube-apiserver status: Ready
functional_test.go:825: kube-controller-manager phase: Running
functional_test.go:835: kube-controller-manager status: Ready
functional_test.go:825: kube-scheduler phase: Running
functional_test.go:835: kube-scheduler status: Ready
--- PASS: TestFunctional/serial/ComponentHealth (0.11s)

                                                
                                    
x
+
TestFunctional/serial/LogsCmd (1.12s)

                                                
                                                
=== RUN   TestFunctional/serial/LogsCmd
functional_test.go:1236: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 logs
functional_test.go:1236: (dbg) Done: out/minikube-linux-arm64 -p functional-300199 logs: (1.117408872s)
--- PASS: TestFunctional/serial/LogsCmd (1.12s)

                                                
                                    
x
+
TestFunctional/serial/LogsFileCmd (1.18s)

                                                
                                                
=== RUN   TestFunctional/serial/LogsFileCmd
functional_test.go:1250: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 logs --file /tmp/TestFunctionalserialLogsFileCmd1489686489/001/logs.txt
functional_test.go:1250: (dbg) Done: out/minikube-linux-arm64 -p functional-300199 logs --file /tmp/TestFunctionalserialLogsFileCmd1489686489/001/logs.txt: (1.180026715s)
--- PASS: TestFunctional/serial/LogsFileCmd (1.18s)

                                                
                                    
x
+
TestFunctional/serial/InvalidService (4.31s)

                                                
                                                
=== RUN   TestFunctional/serial/InvalidService
functional_test.go:2321: (dbg) Run:  kubectl --context functional-300199 apply -f testdata/invalidsvc.yaml
functional_test.go:2335: (dbg) Run:  out/minikube-linux-arm64 service invalid-svc -p functional-300199
functional_test.go:2335: (dbg) Non-zero exit: out/minikube-linux-arm64 service invalid-svc -p functional-300199: exit status 115 (647.665743ms)

                                                
                                                
-- stdout --
	|-----------|-------------|-------------|---------------------------|
	| NAMESPACE |    NAME     | TARGET PORT |            URL            |
	|-----------|-------------|-------------|---------------------------|
	| default   | invalid-svc |          80 | http://192.168.49.2:31967 |
	|-----------|-------------|-------------|---------------------------|
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to SVC_UNREACHABLE: service not available: no running pod for service invalid-svc found
	* 
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│    * Please also attach the following file to the GitHub issue:                             │
	│    * - /tmp/minikube_service_96b204199e3191fa1740d4430b018a3c8028d52d_0.log                 │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯

                                                
                                                
** /stderr **
functional_test.go:2327: (dbg) Run:  kubectl --context functional-300199 delete -f testdata/invalidsvc.yaml
E0815 23:15:58.130354 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
--- PASS: TestFunctional/serial/InvalidService (4.31s)

                                                
                                    
x
+
TestFunctional/parallel/ConfigCmd (0.51s)

                                                
                                                
=== RUN   TestFunctional/parallel/ConfigCmd
=== PAUSE TestFunctional/parallel/ConfigCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ConfigCmd
functional_test.go:1199: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 config unset cpus
functional_test.go:1199: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 config get cpus
functional_test.go:1199: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-300199 config get cpus: exit status 14 (119.713521ms)

                                                
                                                
** stderr ** 
	Error: specified key could not be found in config

                                                
                                                
** /stderr **
functional_test.go:1199: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 config set cpus 2
functional_test.go:1199: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 config get cpus
functional_test.go:1199: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 config unset cpus
functional_test.go:1199: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 config get cpus
functional_test.go:1199: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-300199 config get cpus: exit status 14 (71.632969ms)

                                                
                                                
** stderr ** 
	Error: specified key could not be found in config

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/ConfigCmd (0.51s)

                                                
                                    
x
+
TestFunctional/parallel/DashboardCmd (14.16s)

                                                
                                                
=== RUN   TestFunctional/parallel/DashboardCmd
=== PAUSE TestFunctional/parallel/DashboardCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/DashboardCmd
functional_test.go:905: (dbg) daemon: [out/minikube-linux-arm64 dashboard --url --port 36195 -p functional-300199 --alsologtostderr -v=1]
functional_test.go:910: (dbg) stopping [out/minikube-linux-arm64 dashboard --url --port 36195 -p functional-300199 --alsologtostderr -v=1] ...
helpers_test.go:508: unable to kill pid 2070949: os: process already finished
--- PASS: TestFunctional/parallel/DashboardCmd (14.16s)

                                                
                                    
x
+
TestFunctional/parallel/DryRun (0.5s)

                                                
                                                
=== RUN   TestFunctional/parallel/DryRun
=== PAUSE TestFunctional/parallel/DryRun

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/DryRun
functional_test.go:974: (dbg) Run:  out/minikube-linux-arm64 start -p functional-300199 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=docker
functional_test.go:974: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p functional-300199 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=docker: exit status 23 (195.294626ms)

                                                
                                                
-- stdout --
	* [functional-300199] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=19452
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Using the docker driver based on existing profile
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0815 23:16:33.964281 2070345 out.go:345] Setting OutFile to fd 1 ...
	I0815 23:16:33.964436 2070345 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:16:33.964464 2070345 out.go:358] Setting ErrFile to fd 2...
	I0815 23:16:33.964469 2070345 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:16:33.964746 2070345 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
	I0815 23:16:33.965182 2070345 out.go:352] Setting JSON to false
	I0815 23:16:33.966275 2070345 start.go:129] hostinfo: {"hostname":"ip-172-31-29-130","uptime":28738,"bootTime":1723735056,"procs":233,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1067-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"36adf542-ef4f-4e2d-a0c8-6868d1383ff9"}
	I0815 23:16:33.966352 2070345 start.go:139] virtualization:  
	I0815 23:16:33.968874 2070345 out.go:177] * [functional-300199] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	I0815 23:16:33.971412 2070345 out.go:177]   - MINIKUBE_LOCATION=19452
	I0815 23:16:33.971554 2070345 notify.go:220] Checking for updates...
	I0815 23:16:33.975321 2070345 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0815 23:16:33.977075 2070345 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig
	I0815 23:16:33.978916 2070345 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube
	I0815 23:16:33.980460 2070345 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I0815 23:16:33.982527 2070345 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0815 23:16:33.984838 2070345 config.go:182] Loaded profile config "functional-300199": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
	I0815 23:16:33.985360 2070345 driver.go:392] Setting default libvirt URI to qemu:///system
	I0815 23:16:34.014759 2070345 docker.go:123] docker version: linux-27.1.2:Docker Engine - Community
	I0815 23:16:34.014886 2070345 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0815 23:16:34.095489 2070345 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:32 OomKillDisable:true NGoroutines:52 SystemTime:2024-08-15 23:16:34.084436852 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
	I0815 23:16:34.095615 2070345 docker.go:307] overlay module found
	I0815 23:16:34.098323 2070345 out.go:177] * Using the docker driver based on existing profile
	I0815 23:16:34.100454 2070345 start.go:297] selected driver: docker
	I0815 23:16:34.100473 2070345 start.go:901] validating driver "docker" against &{Name:functional-300199 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:functional-300199 Namespace:default APIServerHAVIP: APIServerNa
me:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker Mou
ntIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0815 23:16:34.100579 2070345 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0815 23:16:34.103475 2070345 out.go:201] 
	W0815 23:16:34.105292 2070345 out.go:270] X Exiting due to RSRC_INSUFFICIENT_REQ_MEMORY: Requested memory allocation 250MiB is less than the usable minimum of 1800MB
	X Exiting due to RSRC_INSUFFICIENT_REQ_MEMORY: Requested memory allocation 250MiB is less than the usable minimum of 1800MB
	I0815 23:16:34.107050 2070345 out.go:201] 

                                                
                                                
** /stderr **
functional_test.go:991: (dbg) Run:  out/minikube-linux-arm64 start -p functional-300199 --dry-run --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
--- PASS: TestFunctional/parallel/DryRun (0.50s)

                                                
                                    
x
+
TestFunctional/parallel/InternationalLanguage (0.21s)

                                                
                                                
=== RUN   TestFunctional/parallel/InternationalLanguage
=== PAUSE TestFunctional/parallel/InternationalLanguage

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/InternationalLanguage
functional_test.go:1020: (dbg) Run:  out/minikube-linux-arm64 start -p functional-300199 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=docker
functional_test.go:1020: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p functional-300199 --dry-run --memory 250MB --alsologtostderr --driver=docker  --container-runtime=docker: exit status 23 (211.843527ms)

                                                
                                                
-- stdout --
	* [functional-300199] minikube v1.33.1 sur Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=19452
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Utilisation du pilote docker basé sur le profil existant
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0815 23:16:33.764315 2070302 out.go:345] Setting OutFile to fd 1 ...
	I0815 23:16:33.764450 2070302 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:16:33.764459 2070302 out.go:358] Setting ErrFile to fd 2...
	I0815 23:16:33.764464 2070302 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:16:33.765336 2070302 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
	I0815 23:16:33.765795 2070302 out.go:352] Setting JSON to false
	I0815 23:16:33.766867 2070302 start.go:129] hostinfo: {"hostname":"ip-172-31-29-130","uptime":28738,"bootTime":1723735056,"procs":233,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1067-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"36adf542-ef4f-4e2d-a0c8-6868d1383ff9"}
	I0815 23:16:33.766940 2070302 start.go:139] virtualization:  
	I0815 23:16:33.769546 2070302 out.go:177] * [functional-300199] minikube v1.33.1 sur Ubuntu 20.04 (arm64)
	I0815 23:16:33.771669 2070302 out.go:177]   - MINIKUBE_LOCATION=19452
	I0815 23:16:33.771764 2070302 notify.go:220] Checking for updates...
	I0815 23:16:33.776243 2070302 out.go:177]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I0815 23:16:33.778110 2070302 out.go:177]   - KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig
	I0815 23:16:33.779913 2070302 out.go:177]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube
	I0815 23:16:33.781865 2070302 out.go:177]   - MINIKUBE_BIN=out/minikube-linux-arm64
	I0815 23:16:33.783646 2070302 out.go:177]   - MINIKUBE_FORCE_SYSTEMD=
	I0815 23:16:33.786983 2070302 config.go:182] Loaded profile config "functional-300199": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
	I0815 23:16:33.787593 2070302 driver.go:392] Setting default libvirt URI to qemu:///system
	I0815 23:16:33.822891 2070302 docker.go:123] docker version: linux-27.1.2:Docker Engine - Community
	I0815 23:16:33.822994 2070302 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0815 23:16:33.900429 2070302 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:32 OomKillDisable:true NGoroutines:52 SystemTime:2024-08-15 23:16:33.89002821 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aarc
h64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerError
s:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
	I0815 23:16:33.900556 2070302 docker.go:307] overlay module found
	I0815 23:16:33.903316 2070302 out.go:177] * Utilisation du pilote docker basé sur le profil existant
	I0815 23:16:33.905573 2070302 start.go:297] selected driver: docker
	I0815 23:16:33.905591 2070302 start.go:901] validating driver "docker" against &{Name:functional-300199 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:functional-300199 Namespace:default APIServerHAVIP: APIServerNa
me:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker Mou
ntIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I0815 23:16:33.905754 2070302 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I0815 23:16:33.908319 2070302 out.go:201] 
	W0815 23:16:33.910547 2070302 out.go:270] X Fermeture en raison de RSRC_INSUFFICIENT_REQ_MEMORY : L'allocation de mémoire demandée 250 Mio est inférieure au minimum utilisable de 1800 Mo
	X Fermeture en raison de RSRC_INSUFFICIENT_REQ_MEMORY : L'allocation de mémoire demandée 250 Mio est inférieure au minimum utilisable de 1800 Mo
	I0815 23:16:33.912888 2070302 out.go:201] 

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/InternationalLanguage (0.21s)

                                                
                                    
x
+
TestFunctional/parallel/StatusCmd (1.08s)

                                                
                                                
=== RUN   TestFunctional/parallel/StatusCmd
=== PAUSE TestFunctional/parallel/StatusCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/StatusCmd
functional_test.go:854: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 status
functional_test.go:860: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 status -f host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}
functional_test.go:872: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 status -o json
--- PASS: TestFunctional/parallel/StatusCmd (1.08s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmdConnect (11.62s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmdConnect
=== PAUSE TestFunctional/parallel/ServiceCmdConnect

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ServiceCmdConnect
functional_test.go:1627: (dbg) Run:  kubectl --context functional-300199 create deployment hello-node-connect --image=registry.k8s.io/echoserver-arm:1.8
functional_test.go:1635: (dbg) Run:  kubectl --context functional-300199 expose deployment hello-node-connect --type=NodePort --port=8080
functional_test.go:1640: (dbg) TestFunctional/parallel/ServiceCmdConnect: waiting 10m0s for pods matching "app=hello-node-connect" in namespace "default" ...
helpers_test.go:344: "hello-node-connect-65d86f57f4-9hrht" [9781ff93-edcf-4a30-9710-fa2f1fc8e218] Pending / Ready:ContainersNotReady (containers with unready status: [echoserver-arm]) / ContainersReady:ContainersNotReady (containers with unready status: [echoserver-arm])
helpers_test.go:344: "hello-node-connect-65d86f57f4-9hrht" [9781ff93-edcf-4a30-9710-fa2f1fc8e218] Running
functional_test.go:1640: (dbg) TestFunctional/parallel/ServiceCmdConnect: app=hello-node-connect healthy within 11.004168984s
functional_test.go:1649: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 service hello-node-connect --url
functional_test.go:1655: found endpoint for hello-node-connect: http://192.168.49.2:30745
functional_test.go:1675: http://192.168.49.2:30745: success! body:

                                                
                                                

                                                
                                                
Hostname: hello-node-connect-65d86f57f4-9hrht

                                                
                                                
Pod Information:
	-no pod information available-

                                                
                                                
Server values:
	server_version=nginx: 1.13.3 - lua: 10008

                                                
                                                
Request Information:
	client_address=10.244.0.1
	method=GET
	real path=/
	query=
	request_version=1.1
	request_uri=http://192.168.49.2:8080/

                                                
                                                
Request Headers:
	accept-encoding=gzip
	host=192.168.49.2:30745
	user-agent=Go-http-client/1.1

                                                
                                                
Request Body:
	-no body in request-

                                                
                                                
--- PASS: TestFunctional/parallel/ServiceCmdConnect (11.62s)

                                                
                                    
x
+
TestFunctional/parallel/AddonsCmd (0.21s)

                                                
                                                
=== RUN   TestFunctional/parallel/AddonsCmd
=== PAUSE TestFunctional/parallel/AddonsCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/AddonsCmd
functional_test.go:1690: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 addons list
functional_test.go:1702: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 addons list -o json
--- PASS: TestFunctional/parallel/AddonsCmd (0.21s)

                                                
                                    
x
+
TestFunctional/parallel/PersistentVolumeClaim (26.65s)

                                                
                                                
=== RUN   TestFunctional/parallel/PersistentVolumeClaim
=== PAUSE TestFunctional/parallel/PersistentVolumeClaim

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/PersistentVolumeClaim
functional_test_pvc_test.go:44: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 4m0s for pods matching "integration-test=storage-provisioner" in namespace "kube-system" ...
helpers_test.go:344: "storage-provisioner" [f9507a2b-c425-4390-b10c-29db6f57b1c7] Running
functional_test_pvc_test.go:44: (dbg) TestFunctional/parallel/PersistentVolumeClaim: integration-test=storage-provisioner healthy within 6.004054914s
functional_test_pvc_test.go:49: (dbg) Run:  kubectl --context functional-300199 get storageclass -o=json
functional_test_pvc_test.go:69: (dbg) Run:  kubectl --context functional-300199 apply -f testdata/storage-provisioner/pvc.yaml
functional_test_pvc_test.go:76: (dbg) Run:  kubectl --context functional-300199 get pvc myclaim -o=json
functional_test_pvc_test.go:125: (dbg) Run:  kubectl --context functional-300199 apply -f testdata/storage-provisioner/pod.yaml
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 3m0s for pods matching "test=storage-provisioner" in namespace "default" ...
helpers_test.go:344: "sp-pod" [808b8797-8e49-488b-affe-4bf9ea571904] Pending
helpers_test.go:344: "sp-pod" [808b8797-8e49-488b-affe-4bf9ea571904] Pending / Ready:ContainersNotReady (containers with unready status: [myfrontend]) / ContainersReady:ContainersNotReady (containers with unready status: [myfrontend])
helpers_test.go:344: "sp-pod" [808b8797-8e49-488b-affe-4bf9ea571904] Running
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: test=storage-provisioner healthy within 12.003107368s
functional_test_pvc_test.go:100: (dbg) Run:  kubectl --context functional-300199 exec sp-pod -- touch /tmp/mount/foo
functional_test_pvc_test.go:106: (dbg) Run:  kubectl --context functional-300199 delete -f testdata/storage-provisioner/pod.yaml
functional_test_pvc_test.go:125: (dbg) Run:  kubectl --context functional-300199 apply -f testdata/storage-provisioner/pod.yaml
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 3m0s for pods matching "test=storage-provisioner" in namespace "default" ...
helpers_test.go:344: "sp-pod" [39af6422-9d00-485b-ad0b-b6413fa4573f] Pending
helpers_test.go:344: "sp-pod" [39af6422-9d00-485b-ad0b-b6413fa4573f] Pending / Ready:ContainersNotReady (containers with unready status: [myfrontend]) / ContainersReady:ContainersNotReady (containers with unready status: [myfrontend])
helpers_test.go:344: "sp-pod" [39af6422-9d00-485b-ad0b-b6413fa4573f] Running
functional_test_pvc_test.go:130: (dbg) TestFunctional/parallel/PersistentVolumeClaim: test=storage-provisioner healthy within 7.004075614s
functional_test_pvc_test.go:114: (dbg) Run:  kubectl --context functional-300199 exec sp-pod -- ls /tmp/mount
--- PASS: TestFunctional/parallel/PersistentVolumeClaim (26.65s)

                                                
                                    
x
+
TestFunctional/parallel/SSHCmd (0.7s)

                                                
                                                
=== RUN   TestFunctional/parallel/SSHCmd
=== PAUSE TestFunctional/parallel/SSHCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/SSHCmd
functional_test.go:1725: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "echo hello"
functional_test.go:1742: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "cat /etc/hostname"
--- PASS: TestFunctional/parallel/SSHCmd (0.70s)

                                                
                                    
x
+
TestFunctional/parallel/CpCmd (2.33s)

                                                
                                                
=== RUN   TestFunctional/parallel/CpCmd
=== PAUSE TestFunctional/parallel/CpCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/CpCmd
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 cp testdata/cp-test.txt /home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh -n functional-300199 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 cp functional-300199:/home/docker/cp-test.txt /tmp/TestFunctionalparallelCpCmd2705700416/001/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh -n functional-300199 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 cp testdata/cp-test.txt /tmp/does/not/exist/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh -n functional-300199 "sudo cat /tmp/does/not/exist/cp-test.txt"
--- PASS: TestFunctional/parallel/CpCmd (2.33s)

                                                
                                    
x
+
TestFunctional/parallel/FileSync (0.33s)

                                                
                                                
=== RUN   TestFunctional/parallel/FileSync
=== PAUSE TestFunctional/parallel/FileSync

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/FileSync
functional_test.go:1929: Checking for existence of /etc/test/nested/copy/2031396/hosts within VM
functional_test.go:1931: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "sudo cat /etc/test/nested/copy/2031396/hosts"
functional_test.go:1936: file sync test content: Test file for checking file sync process
--- PASS: TestFunctional/parallel/FileSync (0.33s)

                                                
                                    
x
+
TestFunctional/parallel/CertSync (2.13s)

                                                
                                                
=== RUN   TestFunctional/parallel/CertSync
=== PAUSE TestFunctional/parallel/CertSync

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/CertSync
functional_test.go:1972: Checking for existence of /etc/ssl/certs/2031396.pem within VM
functional_test.go:1973: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "sudo cat /etc/ssl/certs/2031396.pem"
functional_test.go:1972: Checking for existence of /usr/share/ca-certificates/2031396.pem within VM
functional_test.go:1973: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "sudo cat /usr/share/ca-certificates/2031396.pem"
functional_test.go:1972: Checking for existence of /etc/ssl/certs/51391683.0 within VM
functional_test.go:1973: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "sudo cat /etc/ssl/certs/51391683.0"
functional_test.go:1999: Checking for existence of /etc/ssl/certs/20313962.pem within VM
functional_test.go:2000: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "sudo cat /etc/ssl/certs/20313962.pem"
functional_test.go:1999: Checking for existence of /usr/share/ca-certificates/20313962.pem within VM
functional_test.go:2000: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "sudo cat /usr/share/ca-certificates/20313962.pem"
functional_test.go:1999: Checking for existence of /etc/ssl/certs/3ec20f2e.0 within VM
functional_test.go:2000: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "sudo cat /etc/ssl/certs/3ec20f2e.0"
--- PASS: TestFunctional/parallel/CertSync (2.13s)

                                                
                                    
x
+
TestFunctional/parallel/NodeLabels (0.1s)

                                                
                                                
=== RUN   TestFunctional/parallel/NodeLabels
=== PAUSE TestFunctional/parallel/NodeLabels

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/NodeLabels
functional_test.go:219: (dbg) Run:  kubectl --context functional-300199 get nodes --output=go-template "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'"
--- PASS: TestFunctional/parallel/NodeLabels (0.10s)

                                                
                                    
x
+
TestFunctional/parallel/NonActiveRuntimeDisabled (0.38s)

                                                
                                                
=== RUN   TestFunctional/parallel/NonActiveRuntimeDisabled
=== PAUSE TestFunctional/parallel/NonActiveRuntimeDisabled

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/NonActiveRuntimeDisabled
functional_test.go:2027: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "sudo systemctl is-active crio"
functional_test.go:2027: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-300199 ssh "sudo systemctl is-active crio": exit status 1 (376.979642ms)

                                                
                                                
-- stdout --
	inactive

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/NonActiveRuntimeDisabled (0.38s)

                                                
                                    
x
+
TestFunctional/parallel/License (0.22s)

                                                
                                                
=== RUN   TestFunctional/parallel/License
=== PAUSE TestFunctional/parallel/License

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/License
functional_test.go:2288: (dbg) Run:  out/minikube-linux-arm64 license
--- PASS: TestFunctional/parallel/License (0.22s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel (0.82s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel
functional_test_tunnel_test.go:154: (dbg) daemon: [out/minikube-linux-arm64 -p functional-300199 tunnel --alsologtostderr]
functional_test_tunnel_test.go:154: (dbg) daemon: [out/minikube-linux-arm64 -p functional-300199 tunnel --alsologtostderr]
functional_test_tunnel_test.go:194: (dbg) stopping [out/minikube-linux-arm64 -p functional-300199 tunnel --alsologtostderr] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
functional_test_tunnel_test.go:194: (dbg) stopping [out/minikube-linux-arm64 -p functional-300199 tunnel --alsologtostderr] ...
helpers_test.go:508: unable to kill pid 2067611: os: process already finished
helpers_test.go:508: unable to kill pid 2067427: os: process already finished
--- PASS: TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel (0.82s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/StartTunnel (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/StartTunnel
functional_test_tunnel_test.go:129: (dbg) daemon: [out/minikube-linux-arm64 -p functional-300199 tunnel --alsologtostderr]
--- PASS: TestFunctional/parallel/TunnelCmd/serial/StartTunnel (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup (10.45s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup
functional_test_tunnel_test.go:212: (dbg) Run:  kubectl --context functional-300199 apply -f testdata/testsvc.yaml
functional_test_tunnel_test.go:216: (dbg) TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup: waiting 4m0s for pods matching "run=nginx-svc" in namespace "default" ...
helpers_test.go:344: "nginx-svc" [b2ff2a9f-bc33-4baa-8098-6907dc8e0d05] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx-svc" [b2ff2a9f-bc33-4baa-8098-6907dc8e0d05] Running
functional_test_tunnel_test.go:216: (dbg) TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup: run=nginx-svc healthy within 10.003598784s
--- PASS: TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup (10.45s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP (0.1s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP
functional_test_tunnel_test.go:234: (dbg) Run:  kubectl --context functional-300199 get svc nginx-svc -o jsonpath={.status.loadBalancer.ingress[0].ip}
--- PASS: TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP (0.10s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/AccessDirect (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/AccessDirect
functional_test_tunnel_test.go:299: tunnel at http://10.102.38.38 is working!
--- PASS: TestFunctional/parallel/TunnelCmd/serial/AccessDirect (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel (0.11s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel
functional_test_tunnel_test.go:434: (dbg) stopping [out/minikube-linux-arm64 -p functional-300199 tunnel --alsologtostderr] ...
--- PASS: TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel (0.11s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/DeployApp (7.21s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/DeployApp
functional_test.go:1437: (dbg) Run:  kubectl --context functional-300199 create deployment hello-node --image=registry.k8s.io/echoserver-arm:1.8
functional_test.go:1445: (dbg) Run:  kubectl --context functional-300199 expose deployment hello-node --type=NodePort --port=8080
functional_test.go:1450: (dbg) TestFunctional/parallel/ServiceCmd/DeployApp: waiting 10m0s for pods matching "app=hello-node" in namespace "default" ...
helpers_test.go:344: "hello-node-64b4f8f9ff-2g4cb" [f9c3502a-93f6-4192-a85e-194ae1e93fa5] Pending / Ready:ContainersNotReady (containers with unready status: [echoserver-arm]) / ContainersReady:ContainersNotReady (containers with unready status: [echoserver-arm])
helpers_test.go:344: "hello-node-64b4f8f9ff-2g4cb" [f9c3502a-93f6-4192-a85e-194ae1e93fa5] Running
functional_test.go:1450: (dbg) TestFunctional/parallel/ServiceCmd/DeployApp: app=hello-node healthy within 7.004234181s
--- PASS: TestFunctional/parallel/ServiceCmd/DeployApp (7.21s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_not_create (0.38s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_not_create
functional_test.go:1270: (dbg) Run:  out/minikube-linux-arm64 profile lis
functional_test.go:1275: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestFunctional/parallel/ProfileCmd/profile_not_create (0.38s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_list (0.38s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_list
functional_test.go:1310: (dbg) Run:  out/minikube-linux-arm64 profile list
functional_test.go:1315: Took "323.656899ms" to run "out/minikube-linux-arm64 profile list"
functional_test.go:1324: (dbg) Run:  out/minikube-linux-arm64 profile list -l
functional_test.go:1329: Took "54.667107ms" to run "out/minikube-linux-arm64 profile list -l"
--- PASS: TestFunctional/parallel/ProfileCmd/profile_list (0.38s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_json_output (0.38s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_json_output
functional_test.go:1361: (dbg) Run:  out/minikube-linux-arm64 profile list -o json
functional_test.go:1366: Took "323.095092ms" to run "out/minikube-linux-arm64 profile list -o json"
functional_test.go:1374: (dbg) Run:  out/minikube-linux-arm64 profile list -o json --light
functional_test.go:1379: Took "61.404591ms" to run "out/minikube-linux-arm64 profile list -o json --light"
--- PASS: TestFunctional/parallel/ProfileCmd/profile_json_output (0.38s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/any-port (7.5s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/any-port
functional_test_mount_test.go:73: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-300199 /tmp/TestFunctionalparallelMountCmdany-port2166336278/001:/mount-9p --alsologtostderr -v=1]
functional_test_mount_test.go:107: wrote "test-1723763788610391430" to /tmp/TestFunctionalparallelMountCmdany-port2166336278/001/created-by-test
functional_test_mount_test.go:107: wrote "test-1723763788610391430" to /tmp/TestFunctionalparallelMountCmdany-port2166336278/001/created-by-test-removed-by-pod
functional_test_mount_test.go:107: wrote "test-1723763788610391430" to /tmp/TestFunctionalparallelMountCmdany-port2166336278/001/test-1723763788610391430
functional_test_mount_test.go:115: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:115: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-300199 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (411.220263ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:115: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:129: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh -- ls -la /mount-9p
functional_test_mount_test.go:133: guest mount directory contents
total 2
-rw-r--r-- 1 docker docker 24 Aug 15 23:16 created-by-test
-rw-r--r-- 1 docker docker 24 Aug 15 23:16 created-by-test-removed-by-pod
-rw-r--r-- 1 docker docker 24 Aug 15 23:16 test-1723763788610391430
functional_test_mount_test.go:137: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh cat /mount-9p/test-1723763788610391430
functional_test_mount_test.go:148: (dbg) Run:  kubectl --context functional-300199 replace --force -f testdata/busybox-mount-test.yaml
functional_test_mount_test.go:153: (dbg) TestFunctional/parallel/MountCmd/any-port: waiting 4m0s for pods matching "integration-test=busybox-mount" in namespace "default" ...
helpers_test.go:344: "busybox-mount" [47df729c-bfee-4b56-b57a-850b223ebcb5] Pending
helpers_test.go:344: "busybox-mount" [47df729c-bfee-4b56-b57a-850b223ebcb5] Pending / Ready:ContainersNotReady (containers with unready status: [mount-munger]) / ContainersReady:ContainersNotReady (containers with unready status: [mount-munger])
helpers_test.go:344: "busybox-mount" [47df729c-bfee-4b56-b57a-850b223ebcb5] Pending / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
helpers_test.go:344: "busybox-mount" [47df729c-bfee-4b56-b57a-850b223ebcb5] Succeeded / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
functional_test_mount_test.go:153: (dbg) TestFunctional/parallel/MountCmd/any-port: integration-test=busybox-mount healthy within 4.004093451s
functional_test_mount_test.go:169: (dbg) Run:  kubectl --context functional-300199 logs busybox-mount
functional_test_mount_test.go:181: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh stat /mount-9p/created-by-test
functional_test_mount_test.go:181: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh stat /mount-9p/created-by-pod
functional_test_mount_test.go:90: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "sudo umount -f /mount-9p"
functional_test_mount_test.go:94: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-300199 /tmp/TestFunctionalparallelMountCmdany-port2166336278/001:/mount-9p --alsologtostderr -v=1] ...
--- PASS: TestFunctional/parallel/MountCmd/any-port (7.50s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/List (0.63s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/List
functional_test.go:1459: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 service list
--- PASS: TestFunctional/parallel/ServiceCmd/List (0.63s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/JSONOutput (0.54s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/JSONOutput
functional_test.go:1489: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 service list -o json
functional_test.go:1494: Took "538.401591ms" to run "out/minikube-linux-arm64 -p functional-300199 service list -o json"
--- PASS: TestFunctional/parallel/ServiceCmd/JSONOutput (0.54s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/HTTPS (0.39s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/HTTPS
functional_test.go:1509: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 service --namespace=default --https --url hello-node
functional_test.go:1522: found endpoint: https://192.168.49.2:31348
--- PASS: TestFunctional/parallel/ServiceCmd/HTTPS (0.39s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/Format (0.4s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/Format
functional_test.go:1540: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 service hello-node --url --format={{.IP}}
--- PASS: TestFunctional/parallel/ServiceCmd/Format (0.40s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/URL (0.43s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/URL
functional_test.go:1559: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 service hello-node --url
functional_test.go:1565: found endpoint for hello-node: http://192.168.49.2:31348
--- PASS: TestFunctional/parallel/ServiceCmd/URL (0.43s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/specific-port (2.33s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/specific-port
functional_test_mount_test.go:213: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-300199 /tmp/TestFunctionalparallelMountCmdspecific-port2754772372/001:/mount-9p --alsologtostderr -v=1 --port 46464]
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:243: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-300199 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (616.52133ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:257: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh -- ls -la /mount-9p
functional_test_mount_test.go:261: guest mount directory contents
total 0
functional_test_mount_test.go:263: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-300199 /tmp/TestFunctionalparallelMountCmdspecific-port2754772372/001:/mount-9p --alsologtostderr -v=1 --port 46464] ...
functional_test_mount_test.go:264: reading mount text
functional_test_mount_test.go:278: done reading mount text
functional_test_mount_test.go:230: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "sudo umount -f /mount-9p"
functional_test_mount_test.go:230: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-300199 ssh "sudo umount -f /mount-9p": exit status 1 (363.905278ms)

                                                
                                                
-- stdout --
	umount: /mount-9p: not mounted.

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 32

                                                
                                                
** /stderr **
functional_test_mount_test.go:232: "out/minikube-linux-arm64 -p functional-300199 ssh \"sudo umount -f /mount-9p\"": exit status 1
functional_test_mount_test.go:234: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-300199 /tmp/TestFunctionalparallelMountCmdspecific-port2754772372/001:/mount-9p --alsologtostderr -v=1 --port 46464] ...
--- PASS: TestFunctional/parallel/MountCmd/specific-port (2.33s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/VerifyCleanup (2.5s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/VerifyCleanup
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-300199 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1667287549/001:/mount1 --alsologtostderr -v=1]
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-300199 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1667287549/001:/mount2 --alsologtostderr -v=1]
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-arm64 mount -p functional-300199 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1667287549/001:/mount3 --alsologtostderr -v=1]
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "findmnt -T" /mount1
functional_test_mount_test.go:325: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-300199 ssh "findmnt -T" /mount1: exit status 1 (1.077015339s)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "findmnt -T" /mount1
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "findmnt -T" /mount2
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh "findmnt -T" /mount3
functional_test_mount_test.go:370: (dbg) Run:  out/minikube-linux-arm64 mount -p functional-300199 --kill=true
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-300199 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1667287549/001:/mount1 --alsologtostderr -v=1] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-300199 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1667287549/001:/mount2 --alsologtostderr -v=1] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-arm64 mount -p functional-300199 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1667287549/001:/mount3 --alsologtostderr -v=1] ...
helpers_test.go:490: unable to find parent, assuming dead: process does not exist
--- PASS: TestFunctional/parallel/MountCmd/VerifyCleanup (2.50s)

                                                
                                    
x
+
TestFunctional/parallel/Version/short (0.08s)

                                                
                                                
=== RUN   TestFunctional/parallel/Version/short
=== PAUSE TestFunctional/parallel/Version/short

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/Version/short
functional_test.go:2256: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 version --short
--- PASS: TestFunctional/parallel/Version/short (0.08s)

                                                
                                    
x
+
TestFunctional/parallel/Version/components (1.16s)

                                                
                                                
=== RUN   TestFunctional/parallel/Version/components
=== PAUSE TestFunctional/parallel/Version/components

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/Version/components
functional_test.go:2270: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 version -o=json --components
functional_test.go:2270: (dbg) Done: out/minikube-linux-arm64 -p functional-300199 version -o=json --components: (1.161542827s)
--- PASS: TestFunctional/parallel/Version/components (1.16s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListShort (0.23s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListShort
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListShort

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListShort
functional_test.go:261: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image ls --format short --alsologtostderr
functional_test.go:266: (dbg) Stdout: out/minikube-linux-arm64 -p functional-300199 image ls --format short --alsologtostderr:
registry.k8s.io/pause:latest
registry.k8s.io/pause:3.3
registry.k8s.io/pause:3.10
registry.k8s.io/pause:3.1
registry.k8s.io/kube-scheduler:v1.31.0
registry.k8s.io/kube-proxy:v1.31.0
registry.k8s.io/kube-controller-manager:v1.31.0
registry.k8s.io/kube-apiserver:v1.31.0
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/echoserver-arm:1.8
registry.k8s.io/coredns/coredns:v1.11.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
docker.io/library/nginx:latest
docker.io/library/nginx:alpine
docker.io/library/minikube-local-cache-test:functional-300199
docker.io/kubernetesui/metrics-scraper:<none>
docker.io/kubernetesui/dashboard:<none>
docker.io/kicbase/echo-server:functional-300199
functional_test.go:269: (dbg) Stderr: out/minikube-linux-arm64 -p functional-300199 image ls --format short --alsologtostderr:
I0815 23:16:50.073602 2073177 out.go:345] Setting OutFile to fd 1 ...
I0815 23:16:50.073858 2073177 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0815 23:16:50.073911 2073177 out.go:358] Setting ErrFile to fd 2...
I0815 23:16:50.073931 2073177 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0815 23:16:50.074238 2073177 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
I0815 23:16:50.075109 2073177 config.go:182] Loaded profile config "functional-300199": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
I0815 23:16:50.075332 2073177 config.go:182] Loaded profile config "functional-300199": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
I0815 23:16:50.075947 2073177 cli_runner.go:164] Run: docker container inspect functional-300199 --format={{.State.Status}}
I0815 23:16:50.097003 2073177 ssh_runner.go:195] Run: systemctl --version
I0815 23:16:50.097060 2073177 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-300199
I0815 23:16:50.118684 2073177 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34734 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/functional-300199/id_rsa Username:docker}
I0815 23:16:50.214840 2073177 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListShort (0.23s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListTable (0.23s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListTable
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListTable

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListTable
functional_test.go:261: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image ls --format table --alsologtostderr
functional_test.go:266: (dbg) Stdout: out/minikube-linux-arm64 -p functional-300199 image ls --format table --alsologtostderr:
|---------------------------------------------|-------------------|---------------|--------|
|                    Image                    |        Tag        |   Image ID    |  Size  |
|---------------------------------------------|-------------------|---------------|--------|
| registry.k8s.io/etcd                        | 3.5.15-0          | 27e3830e14027 | 139MB  |
| docker.io/kicbase/echo-server               | functional-300199 | ce2d2cda2d858 | 4.78MB |
| gcr.io/k8s-minikube/busybox                 | 1.28.4-glibc      | 1611cd07b61d5 | 3.55MB |
| registry.k8s.io/pause                       | latest            | 8cb2091f603e7 | 240kB  |
| registry.k8s.io/kube-controller-manager     | v1.31.0           | fcb0683e6bdbd | 85.9MB |
| registry.k8s.io/kube-proxy                  | v1.31.0           | 71d55d66fd4ee | 94.7MB |
| docker.io/library/nginx                     | alpine            | d7cd33d7d4ed1 | 44.8MB |
| gcr.io/k8s-minikube/storage-provisioner     | v5                | ba04bb24b9575 | 29MB   |
| registry.k8s.io/kube-apiserver              | v1.31.0           | cd0f0ae0ec9e0 | 91.5MB |
| registry.k8s.io/coredns/coredns             | v1.11.1           | 2437cf7621777 | 57.4MB |
| docker.io/kubernetesui/dashboard            | <none>            | 20b332c9a70d8 | 244MB  |
| registry.k8s.io/pause                       | 3.3               | 3d18732f8686c | 484kB  |
| registry.k8s.io/pause                       | 3.1               | 8057e0500773a | 525kB  |
| registry.k8s.io/echoserver-arm              | 1.8               | 72565bf5bbedf | 85MB   |
| docker.io/library/minikube-local-cache-test | functional-300199 | 06cca73177100 | 30B    |
| docker.io/library/nginx                     | latest            | a9dfdba8b7190 | 193MB  |
| registry.k8s.io/kube-scheduler              | v1.31.0           | fbbbd428abb4d | 66MB   |
| registry.k8s.io/pause                       | 3.10              | afb61768ce381 | 514kB  |
| docker.io/kubernetesui/metrics-scraper      | <none>            | a422e0e982356 | 42.3MB |
|---------------------------------------------|-------------------|---------------|--------|
functional_test.go:269: (dbg) Stderr: out/minikube-linux-arm64 -p functional-300199 image ls --format table --alsologtostderr:
I0815 23:16:51.248614 2073544 out.go:345] Setting OutFile to fd 1 ...
I0815 23:16:51.248801 2073544 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0815 23:16:51.248808 2073544 out.go:358] Setting ErrFile to fd 2...
I0815 23:16:51.248813 2073544 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0815 23:16:51.249074 2073544 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
I0815 23:16:51.249749 2073544 config.go:182] Loaded profile config "functional-300199": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
I0815 23:16:51.249872 2073544 config.go:182] Loaded profile config "functional-300199": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
I0815 23:16:51.250446 2073544 cli_runner.go:164] Run: docker container inspect functional-300199 --format={{.State.Status}}
I0815 23:16:51.276844 2073544 ssh_runner.go:195] Run: systemctl --version
I0815 23:16:51.276900 2073544 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-300199
I0815 23:16:51.295080 2073544 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34734 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/functional-300199/id_rsa Username:docker}
I0815 23:16:51.390190 2073544 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListTable (0.23s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListJson (0.25s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListJson
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListJson

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListJson
functional_test.go:261: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image ls --format json --alsologtostderr
functional_test.go:266: (dbg) Stdout: out/minikube-linux-arm64 -p functional-300199 image ls --format json --alsologtostderr:
[{"id":"ce2d2cda2d858fdaea84129deb86d18e5dbf1c548f230b79fdca74cc91729d17","repoDigests":[],"repoTags":["docker.io/kicbase/echo-server:functional-300199"],"size":"4780000"},{"id":"ba04bb24b95753201135cbc420b233c1b0b9fa2e1fd21d28319c348c33fbcde6","repoDigests":[],"repoTags":["gcr.io/k8s-minikube/storage-provisioner:v5"],"size":"29000000"},{"id":"cd0f0ae0ec9e0cdc092079156c122bf034ba3f24d31c1b1dd1b52a42ecf9b388","repoDigests":[],"repoTags":["registry.k8s.io/kube-apiserver:v1.31.0"],"size":"91500000"},{"id":"fbbbd428abb4dae52ab3018797d00d5840a739f0cc5697b662791831a60b0adb","repoDigests":[],"repoTags":["registry.k8s.io/kube-scheduler:v1.31.0"],"size":"66000000"},{"id":"71d55d66fd4eec8986225089a135fadd96bc6624d987096808772ce1e1924d89","repoDigests":[],"repoTags":["registry.k8s.io/kube-proxy:v1.31.0"],"size":"94700000"},{"id":"afb61768ce381961ca0beff95337601f29dc70ff3ed14e5e4b3e5699057e6aa8","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.10"],"size":"514000"},{"id":"2437cf762177702dec2dfe99a09c37427a15
af6d9a57c456b65352667c223d93","repoDigests":[],"repoTags":["registry.k8s.io/coredns/coredns:v1.11.1"],"size":"57400000"},{"id":"20b332c9a70d8516d849d1ac23eff5800cbb2f263d379f0ec11ee908db6b25a8","repoDigests":[],"repoTags":["docker.io/kubernetesui/dashboard:\u003cnone\u003e"],"size":"244000000"},{"id":"3d18732f8686cc3c878055d99a05fa80289502fa496b36b6a0fe0f77206a7300","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.3"],"size":"484000"},{"id":"72565bf5bbedfb62e9d21afa2b1221b2c7a5e05b746dae33430bc550d3f87beb","repoDigests":[],"repoTags":["registry.k8s.io/echoserver-arm:1.8"],"size":"85000000"},{"id":"06cca73177100ad201c9ef41d88dfcdf02075c1488d0db56ad950949d451e908","repoDigests":[],"repoTags":["docker.io/library/minikube-local-cache-test:functional-300199"],"size":"30"},{"id":"27e3830e1402783674d8b594038967deea9d51f0d91b34c93c8f39d2f68af7da","repoDigests":[],"repoTags":["registry.k8s.io/etcd:3.5.15-0"],"size":"139000000"},{"id":"1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c","repoDigests"
:[],"repoTags":["gcr.io/k8s-minikube/busybox:1.28.4-glibc"],"size":"3550000"},{"id":"a9dfdba8b719078c5705fdecd6f8315765cc79e473111aa9451551ddc340b2bc","repoDigests":[],"repoTags":["docker.io/library/nginx:latest"],"size":"193000000"},{"id":"fcb0683e6bdbd083710cf2d6fd7eb699c77fe4994c38a5c82d059e2e3cb4c2fd","repoDigests":[],"repoTags":["registry.k8s.io/kube-controller-manager:v1.31.0"],"size":"85900000"},{"id":"d7cd33d7d4ed1cdef69594adc36fcc03a0aa45ba930d39a9286024d9b2322660","repoDigests":[],"repoTags":["docker.io/library/nginx:alpine"],"size":"44800000"},{"id":"a422e0e982356f6c1cf0e5bb7b733363caae3992a07c99951fbcc73e58ed656a","repoDigests":[],"repoTags":["docker.io/kubernetesui/metrics-scraper:\u003cnone\u003e"],"size":"42300000"},{"id":"8057e0500773a37cde2cff041eb13ebd68c748419a2fbfd1dfb5bf38696cc8e5","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.1"],"size":"525000"},{"id":"8cb2091f603e75187e2f6226c5901d12e00b1d1f778c6471ae4578e8a1c4724a","repoDigests":[],"repoTags":["registry.k8s.io/pause:latest"],"
size":"240000"}]
functional_test.go:269: (dbg) Stderr: out/minikube-linux-arm64 -p functional-300199 image ls --format json --alsologtostderr:
I0815 23:16:51.000570 2073474 out.go:345] Setting OutFile to fd 1 ...
I0815 23:16:51.000677 2073474 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0815 23:16:51.000683 2073474 out.go:358] Setting ErrFile to fd 2...
I0815 23:16:51.000688 2073474 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0815 23:16:51.001051 2073474 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
I0815 23:16:51.006775 2073474 config.go:182] Loaded profile config "functional-300199": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
I0815 23:16:51.007042 2073474 config.go:182] Loaded profile config "functional-300199": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
I0815 23:16:51.007699 2073474 cli_runner.go:164] Run: docker container inspect functional-300199 --format={{.State.Status}}
I0815 23:16:51.033911 2073474 ssh_runner.go:195] Run: systemctl --version
I0815 23:16:51.033970 2073474 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-300199
I0815 23:16:51.057827 2073474 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34734 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/functional-300199/id_rsa Username:docker}
I0815 23:16:51.162895 2073474 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListJson (0.25s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListYaml (0.24s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListYaml
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListYaml

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListYaml
functional_test.go:261: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image ls --format yaml --alsologtostderr
functional_test.go:266: (dbg) Stdout: out/minikube-linux-arm64 -p functional-300199 image ls --format yaml --alsologtostderr:
- id: 06cca73177100ad201c9ef41d88dfcdf02075c1488d0db56ad950949d451e908
repoDigests: []
repoTags:
- docker.io/library/minikube-local-cache-test:functional-300199
size: "30"
- id: cd0f0ae0ec9e0cdc092079156c122bf034ba3f24d31c1b1dd1b52a42ecf9b388
repoDigests: []
repoTags:
- registry.k8s.io/kube-apiserver:v1.31.0
size: "91500000"
- id: 72565bf5bbedfb62e9d21afa2b1221b2c7a5e05b746dae33430bc550d3f87beb
repoDigests: []
repoTags:
- registry.k8s.io/echoserver-arm:1.8
size: "85000000"
- id: afb61768ce381961ca0beff95337601f29dc70ff3ed14e5e4b3e5699057e6aa8
repoDigests: []
repoTags:
- registry.k8s.io/pause:3.10
size: "514000"
- id: a9dfdba8b719078c5705fdecd6f8315765cc79e473111aa9451551ddc340b2bc
repoDigests: []
repoTags:
- docker.io/library/nginx:latest
size: "193000000"
- id: fcb0683e6bdbd083710cf2d6fd7eb699c77fe4994c38a5c82d059e2e3cb4c2fd
repoDigests: []
repoTags:
- registry.k8s.io/kube-controller-manager:v1.31.0
size: "85900000"
- id: fbbbd428abb4dae52ab3018797d00d5840a739f0cc5697b662791831a60b0adb
repoDigests: []
repoTags:
- registry.k8s.io/kube-scheduler:v1.31.0
size: "66000000"
- id: 27e3830e1402783674d8b594038967deea9d51f0d91b34c93c8f39d2f68af7da
repoDigests: []
repoTags:
- registry.k8s.io/etcd:3.5.15-0
size: "139000000"
- id: d7cd33d7d4ed1cdef69594adc36fcc03a0aa45ba930d39a9286024d9b2322660
repoDigests: []
repoTags:
- docker.io/library/nginx:alpine
size: "44800000"
- id: 20b332c9a70d8516d849d1ac23eff5800cbb2f263d379f0ec11ee908db6b25a8
repoDigests: []
repoTags:
- docker.io/kubernetesui/dashboard:<none>
size: "244000000"
- id: ba04bb24b95753201135cbc420b233c1b0b9fa2e1fd21d28319c348c33fbcde6
repoDigests: []
repoTags:
- gcr.io/k8s-minikube/storage-provisioner:v5
size: "29000000"
- id: 3d18732f8686cc3c878055d99a05fa80289502fa496b36b6a0fe0f77206a7300
repoDigests: []
repoTags:
- registry.k8s.io/pause:3.3
size: "484000"
- id: 8cb2091f603e75187e2f6226c5901d12e00b1d1f778c6471ae4578e8a1c4724a
repoDigests: []
repoTags:
- registry.k8s.io/pause:latest
size: "240000"
- id: 8057e0500773a37cde2cff041eb13ebd68c748419a2fbfd1dfb5bf38696cc8e5
repoDigests: []
repoTags:
- registry.k8s.io/pause:3.1
size: "525000"
- id: 71d55d66fd4eec8986225089a135fadd96bc6624d987096808772ce1e1924d89
repoDigests: []
repoTags:
- registry.k8s.io/kube-proxy:v1.31.0
size: "94700000"
- id: 2437cf762177702dec2dfe99a09c37427a15af6d9a57c456b65352667c223d93
repoDigests: []
repoTags:
- registry.k8s.io/coredns/coredns:v1.11.1
size: "57400000"
- id: ce2d2cda2d858fdaea84129deb86d18e5dbf1c548f230b79fdca74cc91729d17
repoDigests: []
repoTags:
- docker.io/kicbase/echo-server:functional-300199
size: "4780000"
- id: a422e0e982356f6c1cf0e5bb7b733363caae3992a07c99951fbcc73e58ed656a
repoDigests: []
repoTags:
- docker.io/kubernetesui/metrics-scraper:<none>
size: "42300000"
- id: 1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c
repoDigests: []
repoTags:
- gcr.io/k8s-minikube/busybox:1.28.4-glibc
size: "3550000"

                                                
                                                
functional_test.go:269: (dbg) Stderr: out/minikube-linux-arm64 -p functional-300199 image ls --format yaml --alsologtostderr:
I0815 23:16:50.761420 2073416 out.go:345] Setting OutFile to fd 1 ...
I0815 23:16:50.761727 2073416 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0815 23:16:50.761742 2073416 out.go:358] Setting ErrFile to fd 2...
I0815 23:16:50.761748 2073416 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0815 23:16:50.762044 2073416 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
I0815 23:16:50.762753 2073416 config.go:182] Loaded profile config "functional-300199": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
I0815 23:16:50.762925 2073416 config.go:182] Loaded profile config "functional-300199": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
I0815 23:16:50.763462 2073416 cli_runner.go:164] Run: docker container inspect functional-300199 --format={{.State.Status}}
I0815 23:16:50.780703 2073416 ssh_runner.go:195] Run: systemctl --version
I0815 23:16:50.780763 2073416 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-300199
I0815 23:16:50.807519 2073416 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34734 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/functional-300199/id_rsa Username:docker}
I0815 23:16:50.911801 2073416 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListYaml (0.24s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageBuild (2.49s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageBuild
=== PAUSE TestFunctional/parallel/ImageCommands/ImageBuild

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageBuild
functional_test.go:308: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 ssh pgrep buildkitd
functional_test.go:308: (dbg) Non-zero exit: out/minikube-linux-arm64 -p functional-300199 ssh pgrep buildkitd: exit status 1 (348.950734ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test.go:315: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image build -t localhost/my-image:functional-300199 testdata/build --alsologtostderr
functional_test.go:315: (dbg) Done: out/minikube-linux-arm64 -p functional-300199 image build -t localhost/my-image:functional-300199 testdata/build --alsologtostderr: (1.937847545s)
functional_test.go:323: (dbg) Stderr: out/minikube-linux-arm64 -p functional-300199 image build -t localhost/my-image:functional-300199 testdata/build --alsologtostderr:
I0815 23:16:50.866335 2073438 out.go:345] Setting OutFile to fd 1 ...
I0815 23:16:50.867260 2073438 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0815 23:16:50.867276 2073438 out.go:358] Setting ErrFile to fd 2...
I0815 23:16:50.867282 2073438 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0815 23:16:50.867622 2073438 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
I0815 23:16:50.868323 2073438 config.go:182] Loaded profile config "functional-300199": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
I0815 23:16:50.869827 2073438 config.go:182] Loaded profile config "functional-300199": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
I0815 23:16:50.870673 2073438 cli_runner.go:164] Run: docker container inspect functional-300199 --format={{.State.Status}}
I0815 23:16:50.889557 2073438 ssh_runner.go:195] Run: systemctl --version
I0815 23:16:50.889641 2073438 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-300199
I0815 23:16:50.908292 2073438 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34734 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/functional-300199/id_rsa Username:docker}
I0815 23:16:50.998327 2073438 build_images.go:161] Building image from path: /tmp/build.2462242950.tar
I0815 23:16:50.998419 2073438 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/build
I0815 23:16:51.010732 2073438 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/build/build.2462242950.tar
I0815 23:16:51.015690 2073438 ssh_runner.go:352] existence check for /var/lib/minikube/build/build.2462242950.tar: stat -c "%s %y" /var/lib/minikube/build/build.2462242950.tar: Process exited with status 1
stdout:

                                                
                                                
stderr:
stat: cannot statx '/var/lib/minikube/build/build.2462242950.tar': No such file or directory
I0815 23:16:51.015727 2073438 ssh_runner.go:362] scp /tmp/build.2462242950.tar --> /var/lib/minikube/build/build.2462242950.tar (3072 bytes)
I0815 23:16:51.044100 2073438 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/build/build.2462242950
I0815 23:16:51.054367 2073438 ssh_runner.go:195] Run: sudo tar -C /var/lib/minikube/build/build.2462242950 -xf /var/lib/minikube/build/build.2462242950.tar
I0815 23:16:51.068552 2073438 docker.go:360] Building image: /var/lib/minikube/build/build.2462242950
I0815 23:16:51.068625 2073438 ssh_runner.go:195] Run: docker build -t localhost/my-image:functional-300199 /var/lib/minikube/build/build.2462242950
#0 building with "default" instance using docker driver

                                                
                                                
#1 [internal] load build definition from Dockerfile
#1 transferring dockerfile: 97B done
#1 DONE 0.0s

                                                
                                                
#2 [internal] load metadata for gcr.io/k8s-minikube/busybox:latest
#2 DONE 0.7s

                                                
                                                
#3 [internal] load .dockerignore
#3 transferring context: 2B done
#3 DONE 0.0s

                                                
                                                
#4 [internal] load build context
#4 transferring context: 62B done
#4 DONE 0.0s

                                                
                                                
#5 [1/3] FROM gcr.io/k8s-minikube/busybox:latest@sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b
#5 resolve gcr.io/k8s-minikube/busybox:latest@sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b 0.0s done
#5 sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b 770B / 770B done
#5 sha256:a77fe109c026308f149d36484d795b42efe0fd29b332be9071f63e1634c36ac9 527B / 527B done
#5 sha256:71a676dd070f4b701c3272e566d84951362f1326ea07d5bbad119d1c4f6b3d02 1.47kB / 1.47kB done
#5 sha256:a01966dde7f8d5ba10b6d87e776c7c8fb5a5f6bfa678874bd28b33b1fc6dba34 0B / 828.50kB 0.1s
#5 sha256:a01966dde7f8d5ba10b6d87e776c7c8fb5a5f6bfa678874bd28b33b1fc6dba34 828.50kB / 828.50kB 0.1s done
#5 extracting sha256:a01966dde7f8d5ba10b6d87e776c7c8fb5a5f6bfa678874bd28b33b1fc6dba34 0.0s done
#5 DONE 0.3s

                                                
                                                
#6 [2/3] RUN true
#6 DONE 0.2s

                                                
                                                
#7 [3/3] ADD content.txt /
#7 DONE 0.0s

                                                
                                                
#8 exporting to image
#8 exporting layers 0.0s done
#8 writing image sha256:d728eaeafe6f6c9ab28474d6114e8a64c21f15c1c07c105c915557e2ad1cd621 done
#8 naming to localhost/my-image:functional-300199 done
#8 DONE 0.1s
I0815 23:16:52.703891 2073438 ssh_runner.go:235] Completed: docker build -t localhost/my-image:functional-300199 /var/lib/minikube/build/build.2462242950: (1.635243543s)
I0815 23:16:52.703957 2073438 ssh_runner.go:195] Run: sudo rm -rf /var/lib/minikube/build/build.2462242950
I0815 23:16:52.714071 2073438 ssh_runner.go:195] Run: sudo rm -f /var/lib/minikube/build/build.2462242950.tar
I0815 23:16:52.723233 2073438 build_images.go:217] Built localhost/my-image:functional-300199 from /tmp/build.2462242950.tar
I0815 23:16:52.723264 2073438 build_images.go:133] succeeded building to: functional-300199
I0815 23:16:52.723270 2073438 build_images.go:134] failed building to: 
functional_test.go:451: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageBuild (2.49s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/Setup (0.78s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/Setup
functional_test.go:342: (dbg) Run:  docker pull kicbase/echo-server:1.0
functional_test.go:347: (dbg) Run:  docker tag kicbase/echo-server:1.0 kicbase/echo-server:functional-300199
--- PASS: TestFunctional/parallel/ImageCommands/Setup (0.78s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageLoadDaemon (1.41s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageLoadDaemon
functional_test.go:355: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image load --daemon kicbase/echo-server:functional-300199 --alsologtostderr
functional_test.go:355: (dbg) Done: out/minikube-linux-arm64 -p functional-300199 image load --daemon kicbase/echo-server:functional-300199 --alsologtostderr: (1.180571069s)
functional_test.go:451: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageLoadDaemon (1.41s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageReloadDaemon (0.8s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageReloadDaemon
functional_test.go:365: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image load --daemon kicbase/echo-server:functional-300199 --alsologtostderr
functional_test.go:451: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageReloadDaemon (0.80s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon (1.03s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon
functional_test.go:235: (dbg) Run:  docker pull kicbase/echo-server:latest
functional_test.go:240: (dbg) Run:  docker tag kicbase/echo-server:latest kicbase/echo-server:functional-300199
functional_test.go:245: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image load --daemon kicbase/echo-server:functional-300199 --alsologtostderr
functional_test.go:451: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon (1.03s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageSaveToFile (0.32s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageSaveToFile
functional_test.go:380: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image save kicbase/echo-server:functional-300199 /home/jenkins/workspace/Docker_Linux_docker_arm64/echo-server-save.tar --alsologtostderr
--- PASS: TestFunctional/parallel/ImageCommands/ImageSaveToFile (0.32s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageRemove (0.43s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageRemove
functional_test.go:392: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image rm kicbase/echo-server:functional-300199 --alsologtostderr
functional_test.go:451: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageRemove (0.43s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageLoadFromFile (0.64s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageLoadFromFile
functional_test.go:409: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image load /home/jenkins/workspace/Docker_Linux_docker_arm64/echo-server-save.tar --alsologtostderr
functional_test.go:451: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageLoadFromFile (0.64s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageSaveDaemon (0.37s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageSaveDaemon
functional_test.go:419: (dbg) Run:  docker rmi kicbase/echo-server:functional-300199
functional_test.go:424: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 image save --daemon kicbase/echo-server:functional-300199 --alsologtostderr
functional_test.go:432: (dbg) Run:  docker image inspect kicbase/echo-server:functional-300199
--- PASS: TestFunctional/parallel/ImageCommands/ImageSaveDaemon (0.37s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_changes (0.2s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_changes
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_changes

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_changes
functional_test.go:2119: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_changes (0.20s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster (0.15s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster
functional_test.go:2119: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster (0.15s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_clusters (0.14s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_clusters
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_clusters

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_clusters
functional_test.go:2119: (dbg) Run:  out/minikube-linux-arm64 -p functional-300199 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_clusters (0.14s)

                                                
                                    
x
+
TestFunctional/parallel/DockerEnv/bash (1.03s)

                                                
                                                
=== RUN   TestFunctional/parallel/DockerEnv/bash
functional_test.go:499: (dbg) Run:  /bin/bash -c "eval $(out/minikube-linux-arm64 -p functional-300199 docker-env) && out/minikube-linux-arm64 status -p functional-300199"
functional_test.go:522: (dbg) Run:  /bin/bash -c "eval $(out/minikube-linux-arm64 -p functional-300199 docker-env) && docker images"
2024/08/15 23:16:48 [DEBUG] GET http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/
--- PASS: TestFunctional/parallel/DockerEnv/bash (1.03s)

                                                
                                    
x
+
TestFunctional/delete_echo-server_images (0.04s)

                                                
                                                
=== RUN   TestFunctional/delete_echo-server_images
functional_test.go:190: (dbg) Run:  docker rmi -f kicbase/echo-server:1.0
functional_test.go:190: (dbg) Run:  docker rmi -f kicbase/echo-server:functional-300199
--- PASS: TestFunctional/delete_echo-server_images (0.04s)

                                                
                                    
x
+
TestFunctional/delete_my-image_image (0.02s)

                                                
                                                
=== RUN   TestFunctional/delete_my-image_image
functional_test.go:198: (dbg) Run:  docker rmi -f localhost/my-image:functional-300199
--- PASS: TestFunctional/delete_my-image_image (0.02s)

                                                
                                    
x
+
TestFunctional/delete_minikube_cached_images (0.03s)

                                                
                                                
=== RUN   TestFunctional/delete_minikube_cached_images
functional_test.go:206: (dbg) Run:  docker rmi -f minikube-local-cache-test:functional-300199
--- PASS: TestFunctional/delete_minikube_cached_images (0.03s)

                                                
                                    
x
+
TestMultiControlPlane/serial/StartCluster (127.93s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/StartCluster
ha_test.go:101: (dbg) Run:  out/minikube-linux-arm64 start -p ha-730369 --wait=true --memory=2200 --ha -v=7 --alsologtostderr --driver=docker  --container-runtime=docker
E0815 23:17:20.053743 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
ha_test.go:101: (dbg) Done: out/minikube-linux-arm64 start -p ha-730369 --wait=true --memory=2200 --ha -v=7 --alsologtostderr --driver=docker  --container-runtime=docker: (2m7.097108553s)
ha_test.go:107: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 status -v=7 --alsologtostderr
--- PASS: TestMultiControlPlane/serial/StartCluster (127.93s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DeployApp (6.94s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DeployApp
ha_test.go:128: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- apply -f ./testdata/ha/ha-pod-dns-test.yaml
ha_test.go:133: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- rollout status deployment/busybox
ha_test.go:133: (dbg) Done: out/minikube-linux-arm64 kubectl -p ha-730369 -- rollout status deployment/busybox: (3.747971028s)
ha_test.go:140: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:163: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- get pods -o jsonpath='{.items[*].metadata.name}'
ha_test.go:171: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- exec busybox-7dff88458-67frl -- nslookup kubernetes.io
ha_test.go:171: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- exec busybox-7dff88458-ckjjn -- nslookup kubernetes.io
ha_test.go:171: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- exec busybox-7dff88458-lwcjs -- nslookup kubernetes.io
ha_test.go:181: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- exec busybox-7dff88458-67frl -- nslookup kubernetes.default
ha_test.go:181: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- exec busybox-7dff88458-ckjjn -- nslookup kubernetes.default
ha_test.go:181: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- exec busybox-7dff88458-lwcjs -- nslookup kubernetes.default
ha_test.go:189: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- exec busybox-7dff88458-67frl -- nslookup kubernetes.default.svc.cluster.local
ha_test.go:189: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- exec busybox-7dff88458-ckjjn -- nslookup kubernetes.default.svc.cluster.local
ha_test.go:189: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- exec busybox-7dff88458-lwcjs -- nslookup kubernetes.default.svc.cluster.local
--- PASS: TestMultiControlPlane/serial/DeployApp (6.94s)

                                                
                                    
x
+
TestMultiControlPlane/serial/PingHostFromPods (1.66s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/PingHostFromPods
ha_test.go:199: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- get pods -o jsonpath='{.items[*].metadata.name}'
ha_test.go:207: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- exec busybox-7dff88458-67frl -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
ha_test.go:218: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- exec busybox-7dff88458-67frl -- sh -c "ping -c 1 192.168.49.1"
ha_test.go:207: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- exec busybox-7dff88458-ckjjn -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
ha_test.go:218: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- exec busybox-7dff88458-ckjjn -- sh -c "ping -c 1 192.168.49.1"
ha_test.go:207: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- exec busybox-7dff88458-lwcjs -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
ha_test.go:218: (dbg) Run:  out/minikube-linux-arm64 kubectl -p ha-730369 -- exec busybox-7dff88458-lwcjs -- sh -c "ping -c 1 192.168.49.1"
--- PASS: TestMultiControlPlane/serial/PingHostFromPods (1.66s)

                                                
                                    
x
+
TestMultiControlPlane/serial/AddWorkerNode (25.7s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/AddWorkerNode
ha_test.go:228: (dbg) Run:  out/minikube-linux-arm64 node add -p ha-730369 -v=7 --alsologtostderr
E0815 23:19:36.183080 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
ha_test.go:228: (dbg) Done: out/minikube-linux-arm64 node add -p ha-730369 -v=7 --alsologtostderr: (24.658840522s)
ha_test.go:234: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 status -v=7 --alsologtostderr
ha_test.go:234: (dbg) Done: out/minikube-linux-arm64 -p ha-730369 status -v=7 --alsologtostderr: (1.037803553s)
--- PASS: TestMultiControlPlane/serial/AddWorkerNode (25.70s)

                                                
                                    
x
+
TestMultiControlPlane/serial/NodeLabels (0.11s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/NodeLabels
ha_test.go:255: (dbg) Run:  kubectl --context ha-730369 get nodes -o "jsonpath=[{range .items[*]}{.metadata.labels},{end}]"
--- PASS: TestMultiControlPlane/serial/NodeLabels (0.11s)

                                                
                                    
x
+
TestMultiControlPlane/serial/HAppyAfterClusterStart (0.8s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/HAppyAfterClusterStart
ha_test.go:281: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiControlPlane/serial/HAppyAfterClusterStart (0.80s)

                                                
                                    
x
+
TestMultiControlPlane/serial/CopyFile (19.04s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/CopyFile
ha_test.go:326: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 status --output json -v=7 --alsologtostderr
ha_test.go:326: (dbg) Done: out/minikube-linux-arm64 -p ha-730369 status --output json -v=7 --alsologtostderr: (1.01124754s)
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp testdata/cp-test.txt ha-730369:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile2675064690/001/cp-test_ha-730369.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369:/home/docker/cp-test.txt ha-730369-m02:/home/docker/cp-test_ha-730369_ha-730369-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m02 "sudo cat /home/docker/cp-test_ha-730369_ha-730369-m02.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369:/home/docker/cp-test.txt ha-730369-m03:/home/docker/cp-test_ha-730369_ha-730369-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m03 "sudo cat /home/docker/cp-test_ha-730369_ha-730369-m03.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369:/home/docker/cp-test.txt ha-730369-m04:/home/docker/cp-test_ha-730369_ha-730369-m04.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m04 "sudo cat /home/docker/cp-test_ha-730369_ha-730369-m04.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp testdata/cp-test.txt ha-730369-m02:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369-m02:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile2675064690/001/cp-test_ha-730369-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369-m02:/home/docker/cp-test.txt ha-730369:/home/docker/cp-test_ha-730369-m02_ha-730369.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369 "sudo cat /home/docker/cp-test_ha-730369-m02_ha-730369.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369-m02:/home/docker/cp-test.txt ha-730369-m03:/home/docker/cp-test_ha-730369-m02_ha-730369-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m03 "sudo cat /home/docker/cp-test_ha-730369-m02_ha-730369-m03.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369-m02:/home/docker/cp-test.txt ha-730369-m04:/home/docker/cp-test_ha-730369-m02_ha-730369-m04.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m04 "sudo cat /home/docker/cp-test_ha-730369-m02_ha-730369-m04.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp testdata/cp-test.txt ha-730369-m03:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369-m03:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile2675064690/001/cp-test_ha-730369-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369-m03:/home/docker/cp-test.txt ha-730369:/home/docker/cp-test_ha-730369-m03_ha-730369.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369 "sudo cat /home/docker/cp-test_ha-730369-m03_ha-730369.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369-m03:/home/docker/cp-test.txt ha-730369-m02:/home/docker/cp-test_ha-730369-m03_ha-730369-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m02 "sudo cat /home/docker/cp-test_ha-730369-m03_ha-730369-m02.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369-m03:/home/docker/cp-test.txt ha-730369-m04:/home/docker/cp-test_ha-730369-m03_ha-730369-m04.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m04 "sudo cat /home/docker/cp-test_ha-730369-m03_ha-730369-m04.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp testdata/cp-test.txt ha-730369-m04:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369-m04:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile2675064690/001/cp-test_ha-730369-m04.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369-m04:/home/docker/cp-test.txt ha-730369:/home/docker/cp-test_ha-730369-m04_ha-730369.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369 "sudo cat /home/docker/cp-test_ha-730369-m04_ha-730369.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369-m04:/home/docker/cp-test.txt ha-730369-m02:/home/docker/cp-test_ha-730369-m04_ha-730369-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m02 "sudo cat /home/docker/cp-test_ha-730369-m04_ha-730369-m02.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 cp ha-730369-m04:/home/docker/cp-test.txt ha-730369-m03:/home/docker/cp-test_ha-730369-m04_ha-730369-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 ssh -n ha-730369-m03 "sudo cat /home/docker/cp-test_ha-730369-m04_ha-730369-m03.txt"
--- PASS: TestMultiControlPlane/serial/CopyFile (19.04s)

                                                
                                    
x
+
TestMultiControlPlane/serial/StopSecondaryNode (11.78s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/StopSecondaryNode
ha_test.go:363: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 node stop m02 -v=7 --alsologtostderr
E0815 23:20:03.896452 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
ha_test.go:363: (dbg) Done: out/minikube-linux-arm64 -p ha-730369 node stop m02 -v=7 --alsologtostderr: (10.970002566s)
ha_test.go:369: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 status -v=7 --alsologtostderr
ha_test.go:369: (dbg) Non-zero exit: out/minikube-linux-arm64 -p ha-730369 status -v=7 --alsologtostderr: exit status 7 (810.337823ms)

                                                
                                                
-- stdout --
	ha-730369
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	ha-730369-m02
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	ha-730369-m03
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	ha-730369-m04
	type: Worker
	host: Running
	kubelet: Running
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0815 23:20:08.725273 2095840 out.go:345] Setting OutFile to fd 1 ...
	I0815 23:20:08.725411 2095840 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:20:08.725428 2095840 out.go:358] Setting ErrFile to fd 2...
	I0815 23:20:08.725435 2095840 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:20:08.725820 2095840 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
	I0815 23:20:08.726091 2095840 out.go:352] Setting JSON to false
	I0815 23:20:08.726138 2095840 mustload.go:65] Loading cluster: ha-730369
	I0815 23:20:08.726839 2095840 config.go:182] Loaded profile config "ha-730369": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
	I0815 23:20:08.726853 2095840 status.go:255] checking status of ha-730369 ...
	I0815 23:20:08.727694 2095840 cli_runner.go:164] Run: docker container inspect ha-730369 --format={{.State.Status}}
	I0815 23:20:08.728200 2095840 notify.go:220] Checking for updates...
	I0815 23:20:08.763042 2095840 status.go:330] ha-730369 host status = "Running" (err=<nil>)
	I0815 23:20:08.763123 2095840 host.go:66] Checking if "ha-730369" exists ...
	I0815 23:20:08.763501 2095840 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-730369
	I0815 23:20:08.793935 2095840 host.go:66] Checking if "ha-730369" exists ...
	I0815 23:20:08.794245 2095840 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0815 23:20:08.794440 2095840 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-730369
	I0815 23:20:08.817395 2095840 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34739 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/ha-730369/id_rsa Username:docker}
	I0815 23:20:08.917855 2095840 ssh_runner.go:195] Run: systemctl --version
	I0815 23:20:08.922789 2095840 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0815 23:20:08.938248 2095840 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0815 23:20:09.024770 2095840 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:4 ContainersRunning:3 ContainersPaused:0 ContainersStopped:1 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:48 OomKillDisable:true NGoroutines:71 SystemTime:2024-08-15 23:20:09.014456227 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
	I0815 23:20:09.025374 2095840 kubeconfig.go:125] found "ha-730369" server: "https://192.168.49.254:8443"
	I0815 23:20:09.025401 2095840 api_server.go:166] Checking apiserver status ...
	I0815 23:20:09.025444 2095840 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0815 23:20:09.040799 2095840 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/2230/cgroup
	I0815 23:20:09.059808 2095840 api_server.go:182] apiserver freezer: "3:freezer:/docker/8c824948ded1c0cf3627ef6452490270623364bdc3a0e27963a00f573fb097f5/kubepods/burstable/pod73bd4475f2c0f0dfe319d7cc3a4f964b/63313ead6701d516a8f9ef7abbad3a10b9fef9b4a5ecd9589d5dabc117da1f5e"
	I0815 23:20:09.059879 2095840 ssh_runner.go:195] Run: sudo cat /sys/fs/cgroup/freezer/docker/8c824948ded1c0cf3627ef6452490270623364bdc3a0e27963a00f573fb097f5/kubepods/burstable/pod73bd4475f2c0f0dfe319d7cc3a4f964b/63313ead6701d516a8f9ef7abbad3a10b9fef9b4a5ecd9589d5dabc117da1f5e/freezer.state
	I0815 23:20:09.070799 2095840 api_server.go:204] freezer state: "THAWED"
	I0815 23:20:09.070831 2095840 api_server.go:253] Checking apiserver healthz at https://192.168.49.254:8443/healthz ...
	I0815 23:20:09.078830 2095840 api_server.go:279] https://192.168.49.254:8443/healthz returned 200:
	ok
	I0815 23:20:09.078858 2095840 status.go:422] ha-730369 apiserver status = Running (err=<nil>)
	I0815 23:20:09.078870 2095840 status.go:257] ha-730369 status: &{Name:ha-730369 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0815 23:20:09.078910 2095840 status.go:255] checking status of ha-730369-m02 ...
	I0815 23:20:09.079247 2095840 cli_runner.go:164] Run: docker container inspect ha-730369-m02 --format={{.State.Status}}
	I0815 23:20:09.097969 2095840 status.go:330] ha-730369-m02 host status = "Stopped" (err=<nil>)
	I0815 23:20:09.097995 2095840 status.go:343] host is not running, skipping remaining checks
	I0815 23:20:09.098003 2095840 status.go:257] ha-730369-m02 status: &{Name:ha-730369-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0815 23:20:09.098055 2095840 status.go:255] checking status of ha-730369-m03 ...
	I0815 23:20:09.098355 2095840 cli_runner.go:164] Run: docker container inspect ha-730369-m03 --format={{.State.Status}}
	I0815 23:20:09.115114 2095840 status.go:330] ha-730369-m03 host status = "Running" (err=<nil>)
	I0815 23:20:09.115141 2095840 host.go:66] Checking if "ha-730369-m03" exists ...
	I0815 23:20:09.115452 2095840 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-730369-m03
	I0815 23:20:09.132194 2095840 host.go:66] Checking if "ha-730369-m03" exists ...
	I0815 23:20:09.132524 2095840 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0815 23:20:09.132570 2095840 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-730369-m03
	I0815 23:20:09.154530 2095840 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34749 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/ha-730369-m03/id_rsa Username:docker}
	I0815 23:20:09.253482 2095840 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0815 23:20:09.267946 2095840 kubeconfig.go:125] found "ha-730369" server: "https://192.168.49.254:8443"
	I0815 23:20:09.268029 2095840 api_server.go:166] Checking apiserver status ...
	I0815 23:20:09.268121 2095840 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0815 23:20:09.280065 2095840 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/2246/cgroup
	I0815 23:20:09.289564 2095840 api_server.go:182] apiserver freezer: "3:freezer:/docker/f216575cee59c0d53d17e8fb009ef66bd22ab59f9f83974da633be5da746767c/kubepods/burstable/pod30755778fee3e210666fa47f73ff1701/1cbe3ed39e2956827a24e12b2aa7c5e9d2b1656d9dc830820282171c738c0370"
	I0815 23:20:09.289677 2095840 ssh_runner.go:195] Run: sudo cat /sys/fs/cgroup/freezer/docker/f216575cee59c0d53d17e8fb009ef66bd22ab59f9f83974da633be5da746767c/kubepods/burstable/pod30755778fee3e210666fa47f73ff1701/1cbe3ed39e2956827a24e12b2aa7c5e9d2b1656d9dc830820282171c738c0370/freezer.state
	I0815 23:20:09.301260 2095840 api_server.go:204] freezer state: "THAWED"
	I0815 23:20:09.301340 2095840 api_server.go:253] Checking apiserver healthz at https://192.168.49.254:8443/healthz ...
	I0815 23:20:09.309551 2095840 api_server.go:279] https://192.168.49.254:8443/healthz returned 200:
	ok
	I0815 23:20:09.309583 2095840 status.go:422] ha-730369-m03 apiserver status = Running (err=<nil>)
	I0815 23:20:09.309594 2095840 status.go:257] ha-730369-m03 status: &{Name:ha-730369-m03 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0815 23:20:09.309654 2095840 status.go:255] checking status of ha-730369-m04 ...
	I0815 23:20:09.309972 2095840 cli_runner.go:164] Run: docker container inspect ha-730369-m04 --format={{.State.Status}}
	I0815 23:20:09.328234 2095840 status.go:330] ha-730369-m04 host status = "Running" (err=<nil>)
	I0815 23:20:09.328295 2095840 host.go:66] Checking if "ha-730369-m04" exists ...
	I0815 23:20:09.328616 2095840 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-730369-m04
	I0815 23:20:09.345529 2095840 host.go:66] Checking if "ha-730369-m04" exists ...
	I0815 23:20:09.346086 2095840 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0815 23:20:09.346147 2095840 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-730369-m04
	I0815 23:20:09.363609 2095840 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34754 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/ha-730369-m04/id_rsa Username:docker}
	I0815 23:20:09.461096 2095840 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0815 23:20:09.474307 2095840 status.go:257] ha-730369-m04 status: &{Name:ha-730369-m04 Host:Running Kubelet:Running APIServer:Irrelevant Kubeconfig:Irrelevant Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiControlPlane/serial/StopSecondaryNode (11.78s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop (0.59s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop
ha_test.go:390: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop (0.59s)

                                                
                                    
x
+
TestMultiControlPlane/serial/RestartSecondaryNode (39.97s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/RestartSecondaryNode
ha_test.go:420: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 node start m02 -v=7 --alsologtostderr
ha_test.go:420: (dbg) Done: out/minikube-linux-arm64 -p ha-730369 node start m02 -v=7 --alsologtostderr: (38.794538991s)
ha_test.go:428: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 status -v=7 --alsologtostderr
ha_test.go:428: (dbg) Done: out/minikube-linux-arm64 -p ha-730369 status -v=7 --alsologtostderr: (1.061172633s)
ha_test.go:448: (dbg) Run:  kubectl get nodes
--- PASS: TestMultiControlPlane/serial/RestartSecondaryNode (39.97s)

                                                
                                    
x
+
TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart (8.74s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart
ha_test.go:281: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
ha_test.go:281: (dbg) Done: out/minikube-linux-arm64 profile list --output json: (8.738661876s)
--- PASS: TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart (8.74s)

                                                
                                    
x
+
TestMultiControlPlane/serial/RestartClusterKeepsNodes (146.17s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/RestartClusterKeepsNodes
ha_test.go:456: (dbg) Run:  out/minikube-linux-arm64 node list -p ha-730369 -v=7 --alsologtostderr
ha_test.go:462: (dbg) Run:  out/minikube-linux-arm64 stop -p ha-730369 -v=7 --alsologtostderr
E0815 23:21:00.531046 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:21:00.537443 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:21:00.548796 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:21:00.570102 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:21:00.611480 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:21:00.692840 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:21:00.854267 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:21:01.175776 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:21:01.817240 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:21:03.099290 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:21:05.661756 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:21:10.783100 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:21:21.024370 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
ha_test.go:462: (dbg) Done: out/minikube-linux-arm64 stop -p ha-730369 -v=7 --alsologtostderr: (34.680776347s)
ha_test.go:467: (dbg) Run:  out/minikube-linux-arm64 start -p ha-730369 --wait=true -v=7 --alsologtostderr
E0815 23:21:41.505832 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:22:22.467912 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
ha_test.go:467: (dbg) Done: out/minikube-linux-arm64 start -p ha-730369 --wait=true -v=7 --alsologtostderr: (1m51.331220112s)
ha_test.go:472: (dbg) Run:  out/minikube-linux-arm64 node list -p ha-730369
--- PASS: TestMultiControlPlane/serial/RestartClusterKeepsNodes (146.17s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DeleteSecondaryNode (11.21s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DeleteSecondaryNode
ha_test.go:487: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 node delete m03 -v=7 --alsologtostderr
ha_test.go:487: (dbg) Done: out/minikube-linux-arm64 -p ha-730369 node delete m03 -v=7 --alsologtostderr: (10.227870789s)
ha_test.go:493: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 status -v=7 --alsologtostderr
ha_test.go:511: (dbg) Run:  kubectl get nodes
ha_test.go:519: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiControlPlane/serial/DeleteSecondaryNode (11.21s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete (0.55s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete
ha_test.go:390: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete (0.55s)

                                                
                                    
x
+
TestMultiControlPlane/serial/StopCluster (32.92s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/StopCluster
ha_test.go:531: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 stop -v=7 --alsologtostderr
E0815 23:23:44.389309 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
ha_test.go:531: (dbg) Done: out/minikube-linux-arm64 -p ha-730369 stop -v=7 --alsologtostderr: (32.799767319s)
ha_test.go:537: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 status -v=7 --alsologtostderr
ha_test.go:537: (dbg) Non-zero exit: out/minikube-linux-arm64 -p ha-730369 status -v=7 --alsologtostderr: exit status 7 (114.998911ms)

                                                
                                                
-- stdout --
	ha-730369
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	ha-730369-m02
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	ha-730369-m04
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0815 23:24:09.555529 2122018 out.go:345] Setting OutFile to fd 1 ...
	I0815 23:24:09.555704 2122018 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:24:09.555717 2122018 out.go:358] Setting ErrFile to fd 2...
	I0815 23:24:09.555724 2122018 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:24:09.555998 2122018 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
	I0815 23:24:09.556228 2122018 out.go:352] Setting JSON to false
	I0815 23:24:09.556282 2122018 mustload.go:65] Loading cluster: ha-730369
	I0815 23:24:09.556369 2122018 notify.go:220] Checking for updates...
	I0815 23:24:09.556743 2122018 config.go:182] Loaded profile config "ha-730369": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
	I0815 23:24:09.556767 2122018 status.go:255] checking status of ha-730369 ...
	I0815 23:24:09.557338 2122018 cli_runner.go:164] Run: docker container inspect ha-730369 --format={{.State.Status}}
	I0815 23:24:09.575785 2122018 status.go:330] ha-730369 host status = "Stopped" (err=<nil>)
	I0815 23:24:09.575807 2122018 status.go:343] host is not running, skipping remaining checks
	I0815 23:24:09.575815 2122018 status.go:257] ha-730369 status: &{Name:ha-730369 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0815 23:24:09.575855 2122018 status.go:255] checking status of ha-730369-m02 ...
	I0815 23:24:09.576187 2122018 cli_runner.go:164] Run: docker container inspect ha-730369-m02 --format={{.State.Status}}
	I0815 23:24:09.595137 2122018 status.go:330] ha-730369-m02 host status = "Stopped" (err=<nil>)
	I0815 23:24:09.595160 2122018 status.go:343] host is not running, skipping remaining checks
	I0815 23:24:09.595168 2122018 status.go:257] ha-730369-m02 status: &{Name:ha-730369-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0815 23:24:09.595186 2122018 status.go:255] checking status of ha-730369-m04 ...
	I0815 23:24:09.595519 2122018 cli_runner.go:164] Run: docker container inspect ha-730369-m04 --format={{.State.Status}}
	I0815 23:24:09.618625 2122018 status.go:330] ha-730369-m04 host status = "Stopped" (err=<nil>)
	I0815 23:24:09.618648 2122018 status.go:343] host is not running, skipping remaining checks
	I0815 23:24:09.618656 2122018 status.go:257] ha-730369-m04 status: &{Name:ha-730369-m04 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiControlPlane/serial/StopCluster (32.92s)

                                                
                                    
x
+
TestMultiControlPlane/serial/RestartCluster (141.41s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/RestartCluster
ha_test.go:560: (dbg) Run:  out/minikube-linux-arm64 start -p ha-730369 --wait=true -v=7 --alsologtostderr --driver=docker  --container-runtime=docker
E0815 23:24:36.182270 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:26:00.530615 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:26:28.230842 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
ha_test.go:560: (dbg) Done: out/minikube-linux-arm64 start -p ha-730369 --wait=true -v=7 --alsologtostderr --driver=docker  --container-runtime=docker: (2m20.501822054s)
ha_test.go:566: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 status -v=7 --alsologtostderr
ha_test.go:584: (dbg) Run:  kubectl get nodes
ha_test.go:592: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiControlPlane/serial/RestartCluster (141.41s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DegradedAfterClusterRestart (0.61s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DegradedAfterClusterRestart
ha_test.go:390: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiControlPlane/serial/DegradedAfterClusterRestart (0.61s)

                                                
                                    
x
+
TestMultiControlPlane/serial/AddSecondaryNode (47.65s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/AddSecondaryNode
ha_test.go:605: (dbg) Run:  out/minikube-linux-arm64 node add -p ha-730369 --control-plane -v=7 --alsologtostderr
ha_test.go:605: (dbg) Done: out/minikube-linux-arm64 node add -p ha-730369 --control-plane -v=7 --alsologtostderr: (46.602178613s)
ha_test.go:611: (dbg) Run:  out/minikube-linux-arm64 -p ha-730369 status -v=7 --alsologtostderr
ha_test.go:611: (dbg) Done: out/minikube-linux-arm64 -p ha-730369 status -v=7 --alsologtostderr: (1.047120452s)
--- PASS: TestMultiControlPlane/serial/AddSecondaryNode (47.65s)

                                                
                                    
x
+
TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd (0.81s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd
ha_test.go:281: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd (0.81s)

                                                
                                    
x
+
TestImageBuild/serial/Setup (32.31s)

                                                
                                                
=== RUN   TestImageBuild/serial/Setup
image_test.go:69: (dbg) Run:  out/minikube-linux-arm64 start -p image-043996 --driver=docker  --container-runtime=docker
image_test.go:69: (dbg) Done: out/minikube-linux-arm64 start -p image-043996 --driver=docker  --container-runtime=docker: (32.307082652s)
--- PASS: TestImageBuild/serial/Setup (32.31s)

                                                
                                    
x
+
TestImageBuild/serial/NormalBuild (1.8s)

                                                
                                                
=== RUN   TestImageBuild/serial/NormalBuild
image_test.go:78: (dbg) Run:  out/minikube-linux-arm64 image build -t aaa:latest ./testdata/image-build/test-normal -p image-043996
image_test.go:78: (dbg) Done: out/minikube-linux-arm64 image build -t aaa:latest ./testdata/image-build/test-normal -p image-043996: (1.798124153s)
--- PASS: TestImageBuild/serial/NormalBuild (1.80s)

                                                
                                    
x
+
TestImageBuild/serial/BuildWithBuildArg (1.1s)

                                                
                                                
=== RUN   TestImageBuild/serial/BuildWithBuildArg
image_test.go:99: (dbg) Run:  out/minikube-linux-arm64 image build -t aaa:latest --build-opt=build-arg=ENV_A=test_env_str --build-opt=no-cache ./testdata/image-build/test-arg -p image-043996
image_test.go:99: (dbg) Done: out/minikube-linux-arm64 image build -t aaa:latest --build-opt=build-arg=ENV_A=test_env_str --build-opt=no-cache ./testdata/image-build/test-arg -p image-043996: (1.099801205s)
--- PASS: TestImageBuild/serial/BuildWithBuildArg (1.10s)

                                                
                                    
x
+
TestImageBuild/serial/BuildWithDockerIgnore (0.91s)

                                                
                                                
=== RUN   TestImageBuild/serial/BuildWithDockerIgnore
image_test.go:133: (dbg) Run:  out/minikube-linux-arm64 image build -t aaa:latest ./testdata/image-build/test-normal --build-opt=no-cache -p image-043996
--- PASS: TestImageBuild/serial/BuildWithDockerIgnore (0.91s)

                                                
                                    
x
+
TestImageBuild/serial/BuildWithSpecifiedDockerfile (0.75s)

                                                
                                                
=== RUN   TestImageBuild/serial/BuildWithSpecifiedDockerfile
image_test.go:88: (dbg) Run:  out/minikube-linux-arm64 image build -t aaa:latest -f inner/Dockerfile ./testdata/image-build/test-f -p image-043996
--- PASS: TestImageBuild/serial/BuildWithSpecifiedDockerfile (0.75s)

                                                
                                    
x
+
TestJSONOutput/start/Command (73.81s)

                                                
                                                
=== RUN   TestJSONOutput/start/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 start -p json-output-553608 --output=json --user=testUser --memory=2200 --wait=true --driver=docker  --container-runtime=docker
json_output_test.go:63: (dbg) Done: out/minikube-linux-arm64 start -p json-output-553608 --output=json --user=testUser --memory=2200 --wait=true --driver=docker  --container-runtime=docker: (1m13.804256519s)
--- PASS: TestJSONOutput/start/Command (73.81s)

                                                
                                    
x
+
TestJSONOutput/start/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/Audit
--- PASS: TestJSONOutput/start/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/start/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/start/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/start/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/start/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/start/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/start/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/start/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/start/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/Command (0.6s)

                                                
                                                
=== RUN   TestJSONOutput/pause/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 pause -p json-output-553608 --output=json --user=testUser
--- PASS: TestJSONOutput/pause/Command (0.60s)

                                                
                                    
x
+
TestJSONOutput/pause/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/Audit
--- PASS: TestJSONOutput/pause/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/pause/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/pause/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/pause/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/pause/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/pause/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/pause/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/Command (0.55s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 unpause -p json-output-553608 --output=json --user=testUser
--- PASS: TestJSONOutput/unpause/Command (0.55s)

                                                
                                    
x
+
TestJSONOutput/unpause/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/Audit
--- PASS: TestJSONOutput/unpause/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/unpause/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/unpause/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/unpause/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/unpause/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/unpause/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/unpause/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/Command (5.77s)

                                                
                                                
=== RUN   TestJSONOutput/stop/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-arm64 stop -p json-output-553608 --output=json --user=testUser
json_output_test.go:63: (dbg) Done: out/minikube-linux-arm64 stop -p json-output-553608 --output=json --user=testUser: (5.774066225s)
--- PASS: TestJSONOutput/stop/Command (5.77s)

                                                
                                    
x
+
TestJSONOutput/stop/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/Audit
--- PASS: TestJSONOutput/stop/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/stop/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/stop/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/stop/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/stop/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/stop/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/stop/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestErrorJSONOutput (0.27s)

                                                
                                                
=== RUN   TestErrorJSONOutput
json_output_test.go:160: (dbg) Run:  out/minikube-linux-arm64 start -p json-output-error-879979 --memory=2200 --output=json --wait=true --driver=fail
json_output_test.go:160: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p json-output-error-879979 --memory=2200 --output=json --wait=true --driver=fail: exit status 56 (90.920221ms)

                                                
                                                
-- stdout --
	{"specversion":"1.0","id":"f043395d-c682-49a0-8ae2-6bf749cd4f77","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"0","message":"[json-output-error-879979] minikube v1.33.1 on Ubuntu 20.04 (arm64)","name":"Initial Minikube Setup","totalsteps":"19"}}
	{"specversion":"1.0","id":"a644cf39-34e7-4e79-9e81-f938bef76659","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_LOCATION=19452"}}
	{"specversion":"1.0","id":"20d37339-bb7a-4a40-b346-c7c36fa861ce","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true"}}
	{"specversion":"1.0","id":"b70279bd-ab2e-47d7-9290-1c2d2d65eba2","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig"}}
	{"specversion":"1.0","id":"58a528ee-afd5-4c7a-9c74-e012dfe1ad8f","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube"}}
	{"specversion":"1.0","id":"d79daeb4-3416-49d4-aede-3af0d4a497a5","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_BIN=out/minikube-linux-arm64"}}
	{"specversion":"1.0","id":"01f6e2d6-d71a-4f15-bf57-d4db159fd18f","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_FORCE_SYSTEMD="}}
	{"specversion":"1.0","id":"1c7b5c3b-3c77-4ca6-a39f-f103a0732bd4","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.error","datacontenttype":"application/json","data":{"advice":"","exitcode":"56","issues":"","message":"The driver 'fail' is not supported on linux/arm64","name":"DRV_UNSUPPORTED_OS","url":""}}

                                                
                                                
-- /stdout --
helpers_test.go:175: Cleaning up "json-output-error-879979" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p json-output-error-879979
--- PASS: TestErrorJSONOutput (0.27s)

                                                
                                    
x
+
TestKicCustomNetwork/create_custom_network (31.89s)

                                                
                                                
=== RUN   TestKicCustomNetwork/create_custom_network
kic_custom_network_test.go:57: (dbg) Run:  out/minikube-linux-arm64 start -p docker-network-953103 --network=
E0815 23:29:36.183404 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
kic_custom_network_test.go:57: (dbg) Done: out/minikube-linux-arm64 start -p docker-network-953103 --network=: (29.581468882s)
kic_custom_network_test.go:150: (dbg) Run:  docker network ls --format {{.Name}}
helpers_test.go:175: Cleaning up "docker-network-953103" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p docker-network-953103
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p docker-network-953103: (2.286468222s)
--- PASS: TestKicCustomNetwork/create_custom_network (31.89s)

                                                
                                    
x
+
TestKicCustomNetwork/use_default_bridge_network (31.07s)

                                                
                                                
=== RUN   TestKicCustomNetwork/use_default_bridge_network
kic_custom_network_test.go:57: (dbg) Run:  out/minikube-linux-arm64 start -p docker-network-329456 --network=bridge
kic_custom_network_test.go:57: (dbg) Done: out/minikube-linux-arm64 start -p docker-network-329456 --network=bridge: (29.106721833s)
kic_custom_network_test.go:150: (dbg) Run:  docker network ls --format {{.Name}}
helpers_test.go:175: Cleaning up "docker-network-329456" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p docker-network-329456
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p docker-network-329456: (1.943477787s)
--- PASS: TestKicCustomNetwork/use_default_bridge_network (31.07s)

                                                
                                    
x
+
TestKicExistingNetwork (32.78s)

                                                
                                                
=== RUN   TestKicExistingNetwork
kic_custom_network_test.go:150: (dbg) Run:  docker network ls --format {{.Name}}
kic_custom_network_test.go:93: (dbg) Run:  out/minikube-linux-arm64 start -p existing-network-059909 --network=existing-network
E0815 23:30:59.259297 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:31:00.530418 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
kic_custom_network_test.go:93: (dbg) Done: out/minikube-linux-arm64 start -p existing-network-059909 --network=existing-network: (30.539430804s)
helpers_test.go:175: Cleaning up "existing-network-059909" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p existing-network-059909
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p existing-network-059909: (2.081876562s)
--- PASS: TestKicExistingNetwork (32.78s)

                                                
                                    
x
+
TestKicCustomSubnet (33.75s)

                                                
                                                
=== RUN   TestKicCustomSubnet
kic_custom_network_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p custom-subnet-573928 --subnet=192.168.60.0/24
kic_custom_network_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p custom-subnet-573928 --subnet=192.168.60.0/24: (31.654331428s)
kic_custom_network_test.go:161: (dbg) Run:  docker network inspect custom-subnet-573928 --format "{{(index .IPAM.Config 0).Subnet}}"
helpers_test.go:175: Cleaning up "custom-subnet-573928" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p custom-subnet-573928
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p custom-subnet-573928: (2.065025157s)
--- PASS: TestKicCustomSubnet (33.75s)

                                                
                                    
x
+
TestKicStaticIP (35.92s)

                                                
                                                
=== RUN   TestKicStaticIP
kic_custom_network_test.go:132: (dbg) Run:  out/minikube-linux-arm64 start -p static-ip-026183 --static-ip=192.168.200.200
kic_custom_network_test.go:132: (dbg) Done: out/minikube-linux-arm64 start -p static-ip-026183 --static-ip=192.168.200.200: (33.713185332s)
kic_custom_network_test.go:138: (dbg) Run:  out/minikube-linux-arm64 -p static-ip-026183 ip
helpers_test.go:175: Cleaning up "static-ip-026183" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p static-ip-026183
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p static-ip-026183: (2.060271878s)
--- PASS: TestKicStaticIP (35.92s)

                                                
                                    
x
+
TestMainNoArgs (0.05s)

                                                
                                                
=== RUN   TestMainNoArgs
main_test.go:68: (dbg) Run:  out/minikube-linux-arm64
--- PASS: TestMainNoArgs (0.05s)

                                                
                                    
x
+
TestMinikubeProfile (68.9s)

                                                
                                                
=== RUN   TestMinikubeProfile
minikube_profile_test.go:44: (dbg) Run:  out/minikube-linux-arm64 start -p first-990378 --driver=docker  --container-runtime=docker
minikube_profile_test.go:44: (dbg) Done: out/minikube-linux-arm64 start -p first-990378 --driver=docker  --container-runtime=docker: (30.566370711s)
minikube_profile_test.go:44: (dbg) Run:  out/minikube-linux-arm64 start -p second-993077 --driver=docker  --container-runtime=docker
minikube_profile_test.go:44: (dbg) Done: out/minikube-linux-arm64 start -p second-993077 --driver=docker  --container-runtime=docker: (32.869636s)
minikube_profile_test.go:51: (dbg) Run:  out/minikube-linux-arm64 profile first-990378
minikube_profile_test.go:55: (dbg) Run:  out/minikube-linux-arm64 profile list -ojson
minikube_profile_test.go:51: (dbg) Run:  out/minikube-linux-arm64 profile second-993077
minikube_profile_test.go:55: (dbg) Run:  out/minikube-linux-arm64 profile list -ojson
helpers_test.go:175: Cleaning up "second-993077" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p second-993077
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p second-993077: (2.153249591s)
helpers_test.go:175: Cleaning up "first-990378" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p first-990378
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p first-990378: (2.077743062s)
--- PASS: TestMinikubeProfile (68.90s)

                                                
                                    
x
+
TestMountStart/serial/StartWithMountFirst (7.57s)

                                                
                                                
=== RUN   TestMountStart/serial/StartWithMountFirst
mount_start_test.go:98: (dbg) Run:  out/minikube-linux-arm64 start -p mount-start-1-125987 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46464 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=docker
mount_start_test.go:98: (dbg) Done: out/minikube-linux-arm64 start -p mount-start-1-125987 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46464 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=docker: (6.57285137s)
--- PASS: TestMountStart/serial/StartWithMountFirst (7.57s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountFirst (0.25s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountFirst
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-1-125987 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountFirst (0.25s)

                                                
                                    
x
+
TestMountStart/serial/StartWithMountSecond (8.5s)

                                                
                                                
=== RUN   TestMountStart/serial/StartWithMountSecond
mount_start_test.go:98: (dbg) Run:  out/minikube-linux-arm64 start -p mount-start-2-138475 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46465 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=docker
mount_start_test.go:98: (dbg) Done: out/minikube-linux-arm64 start -p mount-start-2-138475 --memory=2048 --mount --mount-gid 0 --mount-msize 6543 --mount-port 46465 --mount-uid 0 --no-kubernetes --driver=docker  --container-runtime=docker: (7.50405242s)
--- PASS: TestMountStart/serial/StartWithMountSecond (8.50s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountSecond (0.26s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountSecond
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-2-138475 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountSecond (0.26s)

                                                
                                    
x
+
TestMountStart/serial/DeleteFirst (1.45s)

                                                
                                                
=== RUN   TestMountStart/serial/DeleteFirst
pause_test.go:132: (dbg) Run:  out/minikube-linux-arm64 delete -p mount-start-1-125987 --alsologtostderr -v=5
pause_test.go:132: (dbg) Done: out/minikube-linux-arm64 delete -p mount-start-1-125987 --alsologtostderr -v=5: (1.446560278s)
--- PASS: TestMountStart/serial/DeleteFirst (1.45s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountPostDelete (0.25s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountPostDelete
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-2-138475 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountPostDelete (0.25s)

                                                
                                    
x
+
TestMountStart/serial/Stop (1.2s)

                                                
                                                
=== RUN   TestMountStart/serial/Stop
mount_start_test.go:155: (dbg) Run:  out/minikube-linux-arm64 stop -p mount-start-2-138475
mount_start_test.go:155: (dbg) Done: out/minikube-linux-arm64 stop -p mount-start-2-138475: (1.201675063s)
--- PASS: TestMountStart/serial/Stop (1.20s)

                                                
                                    
x
+
TestMountStart/serial/RestartStopped (8.58s)

                                                
                                                
=== RUN   TestMountStart/serial/RestartStopped
mount_start_test.go:166: (dbg) Run:  out/minikube-linux-arm64 start -p mount-start-2-138475
mount_start_test.go:166: (dbg) Done: out/minikube-linux-arm64 start -p mount-start-2-138475: (7.574943941s)
--- PASS: TestMountStart/serial/RestartStopped (8.58s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountPostStop (0.25s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountPostStop
mount_start_test.go:114: (dbg) Run:  out/minikube-linux-arm64 -p mount-start-2-138475 ssh -- ls /minikube-host
--- PASS: TestMountStart/serial/VerifyMountPostStop (0.25s)

                                                
                                    
x
+
TestMultiNode/serial/FreshStart2Nodes (82.75s)

                                                
                                                
=== RUN   TestMultiNode/serial/FreshStart2Nodes
multinode_test.go:96: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-196543 --wait=true --memory=2200 --nodes=2 -v=8 --alsologtostderr --driver=docker  --container-runtime=docker
E0815 23:34:36.182488 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
multinode_test.go:96: (dbg) Done: out/minikube-linux-arm64 start -p multinode-196543 --wait=true --memory=2200 --nodes=2 -v=8 --alsologtostderr --driver=docker  --container-runtime=docker: (1m22.179839622s)
multinode_test.go:102: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 status --alsologtostderr
--- PASS: TestMultiNode/serial/FreshStart2Nodes (82.75s)

                                                
                                    
x
+
TestMultiNode/serial/DeployApp2Nodes (35.93s)

                                                
                                                
=== RUN   TestMultiNode/serial/DeployApp2Nodes
multinode_test.go:493: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- apply -f ./testdata/multinodes/multinode-pod-dns-test.yaml
multinode_test.go:498: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- rollout status deployment/busybox
multinode_test.go:498: (dbg) Done: out/minikube-linux-arm64 kubectl -p multinode-196543 -- rollout status deployment/busybox: (3.903384673s)
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:514: expected 2 Pod IPs but got 1 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.3'\n\n-- /stdout --"
multinode_test.go:505: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:528: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- get pods -o jsonpath='{.items[*].metadata.name}'
multinode_test.go:536: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- exec busybox-7dff88458-8zlvd -- nslookup kubernetes.io
multinode_test.go:536: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- exec busybox-7dff88458-clxbq -- nslookup kubernetes.io
multinode_test.go:546: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- exec busybox-7dff88458-8zlvd -- nslookup kubernetes.default
multinode_test.go:546: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- exec busybox-7dff88458-clxbq -- nslookup kubernetes.default
multinode_test.go:554: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- exec busybox-7dff88458-8zlvd -- nslookup kubernetes.default.svc.cluster.local
multinode_test.go:554: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- exec busybox-7dff88458-clxbq -- nslookup kubernetes.default.svc.cluster.local
--- PASS: TestMultiNode/serial/DeployApp2Nodes (35.93s)

                                                
                                    
x
+
TestMultiNode/serial/PingHostFrom2Pods (1.04s)

                                                
                                                
=== RUN   TestMultiNode/serial/PingHostFrom2Pods
multinode_test.go:564: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- get pods -o jsonpath='{.items[*].metadata.name}'
multinode_test.go:572: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- exec busybox-7dff88458-8zlvd -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
multinode_test.go:583: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- exec busybox-7dff88458-8zlvd -- sh -c "ping -c 1 192.168.67.1"
multinode_test.go:572: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- exec busybox-7dff88458-clxbq -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
multinode_test.go:583: (dbg) Run:  out/minikube-linux-arm64 kubectl -p multinode-196543 -- exec busybox-7dff88458-clxbq -- sh -c "ping -c 1 192.168.67.1"
--- PASS: TestMultiNode/serial/PingHostFrom2Pods (1.04s)

                                                
                                    
x
+
TestMultiNode/serial/AddNode (20.91s)

                                                
                                                
=== RUN   TestMultiNode/serial/AddNode
multinode_test.go:121: (dbg) Run:  out/minikube-linux-arm64 node add -p multinode-196543 -v 3 --alsologtostderr
E0815 23:36:00.530322 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
multinode_test.go:121: (dbg) Done: out/minikube-linux-arm64 node add -p multinode-196543 -v 3 --alsologtostderr: (20.123876119s)
multinode_test.go:127: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 status --alsologtostderr
--- PASS: TestMultiNode/serial/AddNode (20.91s)

                                                
                                    
x
+
TestMultiNode/serial/MultiNodeLabels (0.14s)

                                                
                                                
=== RUN   TestMultiNode/serial/MultiNodeLabels
multinode_test.go:221: (dbg) Run:  kubectl --context multinode-196543 get nodes -o "jsonpath=[{range .items[*]}{.metadata.labels},{end}]"
--- PASS: TestMultiNode/serial/MultiNodeLabels (0.14s)

                                                
                                    
x
+
TestMultiNode/serial/ProfileList (0.33s)

                                                
                                                
=== RUN   TestMultiNode/serial/ProfileList
multinode_test.go:143: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
--- PASS: TestMultiNode/serial/ProfileList (0.33s)

                                                
                                    
x
+
TestMultiNode/serial/CopyFile (10.16s)

                                                
                                                
=== RUN   TestMultiNode/serial/CopyFile
multinode_test.go:184: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 status --output json --alsologtostderr
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 cp testdata/cp-test.txt multinode-196543:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 cp multinode-196543:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile2787376721/001/cp-test_multinode-196543.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 cp multinode-196543:/home/docker/cp-test.txt multinode-196543-m02:/home/docker/cp-test_multinode-196543_multinode-196543-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543-m02 "sudo cat /home/docker/cp-test_multinode-196543_multinode-196543-m02.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 cp multinode-196543:/home/docker/cp-test.txt multinode-196543-m03:/home/docker/cp-test_multinode-196543_multinode-196543-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543-m03 "sudo cat /home/docker/cp-test_multinode-196543_multinode-196543-m03.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 cp testdata/cp-test.txt multinode-196543-m02:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 cp multinode-196543-m02:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile2787376721/001/cp-test_multinode-196543-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 cp multinode-196543-m02:/home/docker/cp-test.txt multinode-196543:/home/docker/cp-test_multinode-196543-m02_multinode-196543.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543 "sudo cat /home/docker/cp-test_multinode-196543-m02_multinode-196543.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 cp multinode-196543-m02:/home/docker/cp-test.txt multinode-196543-m03:/home/docker/cp-test_multinode-196543-m02_multinode-196543-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543-m03 "sudo cat /home/docker/cp-test_multinode-196543-m02_multinode-196543-m03.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 cp testdata/cp-test.txt multinode-196543-m03:/home/docker/cp-test.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 cp multinode-196543-m03:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile2787376721/001/cp-test_multinode-196543-m03.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 cp multinode-196543-m03:/home/docker/cp-test.txt multinode-196543:/home/docker/cp-test_multinode-196543-m03_multinode-196543.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543 "sudo cat /home/docker/cp-test_multinode-196543-m03_multinode-196543.txt"
helpers_test.go:556: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 cp multinode-196543-m03:/home/docker/cp-test.txt multinode-196543-m02:/home/docker/cp-test_multinode-196543-m03_multinode-196543-m02.txt
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:534: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 ssh -n multinode-196543-m02 "sudo cat /home/docker/cp-test_multinode-196543-m03_multinode-196543-m02.txt"
--- PASS: TestMultiNode/serial/CopyFile (10.16s)

                                                
                                    
x
+
TestMultiNode/serial/StopNode (2.23s)

                                                
                                                
=== RUN   TestMultiNode/serial/StopNode
multinode_test.go:248: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 node stop m03
multinode_test.go:248: (dbg) Done: out/minikube-linux-arm64 -p multinode-196543 node stop m03: (1.235423601s)
multinode_test.go:254: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 status
multinode_test.go:254: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-196543 status: exit status 7 (502.365857ms)

                                                
                                                
-- stdout --
	multinode-196543
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	multinode-196543-m02
	type: Worker
	host: Running
	kubelet: Running
	
	multinode-196543-m03
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
multinode_test.go:261: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 status --alsologtostderr
multinode_test.go:261: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-196543 status --alsologtostderr: exit status 7 (496.349499ms)

                                                
                                                
-- stdout --
	multinode-196543
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	multinode-196543-m02
	type: Worker
	host: Running
	kubelet: Running
	
	multinode-196543-m03
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0815 23:36:27.551949 2197803 out.go:345] Setting OutFile to fd 1 ...
	I0815 23:36:27.552091 2197803 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:36:27.552102 2197803 out.go:358] Setting ErrFile to fd 2...
	I0815 23:36:27.552107 2197803 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:36:27.552369 2197803 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
	I0815 23:36:27.552562 2197803 out.go:352] Setting JSON to false
	I0815 23:36:27.552607 2197803 mustload.go:65] Loading cluster: multinode-196543
	I0815 23:36:27.552697 2197803 notify.go:220] Checking for updates...
	I0815 23:36:27.553057 2197803 config.go:182] Loaded profile config "multinode-196543": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
	I0815 23:36:27.553068 2197803 status.go:255] checking status of multinode-196543 ...
	I0815 23:36:27.553999 2197803 cli_runner.go:164] Run: docker container inspect multinode-196543 --format={{.State.Status}}
	I0815 23:36:27.571579 2197803 status.go:330] multinode-196543 host status = "Running" (err=<nil>)
	I0815 23:36:27.571606 2197803 host.go:66] Checking if "multinode-196543" exists ...
	I0815 23:36:27.571906 2197803 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-196543
	I0815 23:36:27.599392 2197803 host.go:66] Checking if "multinode-196543" exists ...
	I0815 23:36:27.599707 2197803 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0815 23:36:27.599756 2197803 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-196543
	I0815 23:36:27.616789 2197803 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34864 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/multinode-196543/id_rsa Username:docker}
	I0815 23:36:27.710746 2197803 ssh_runner.go:195] Run: systemctl --version
	I0815 23:36:27.715209 2197803 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0815 23:36:27.727163 2197803 cli_runner.go:164] Run: docker system info --format "{{json .}}"
	I0815 23:36:27.779450 2197803 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:3 ContainersRunning:2 ContainersPaused:0 ContainersStopped:1 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:40 OomKillDisable:true NGoroutines:61 SystemTime:2024-08-15 23:36:27.7699095 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aarch
64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors
:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
	I0815 23:36:27.780010 2197803 kubeconfig.go:125] found "multinode-196543" server: "https://192.168.67.2:8443"
	I0815 23:36:27.780046 2197803 api_server.go:166] Checking apiserver status ...
	I0815 23:36:27.780093 2197803 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I0815 23:36:27.792202 2197803 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/2213/cgroup
	I0815 23:36:27.801718 2197803 api_server.go:182] apiserver freezer: "3:freezer:/docker/86060684c20489ab12da735cef114a87d1c4aa6f6481f994b8d2a296e118225b/kubepods/burstable/pod27fbf1e3dc5306da9043d8471214f143/16fc796b2910fbbddff4d2dc4f364d4509d0d9968af5bdf8c6e2ab828abd06ef"
	I0815 23:36:27.801801 2197803 ssh_runner.go:195] Run: sudo cat /sys/fs/cgroup/freezer/docker/86060684c20489ab12da735cef114a87d1c4aa6f6481f994b8d2a296e118225b/kubepods/burstable/pod27fbf1e3dc5306da9043d8471214f143/16fc796b2910fbbddff4d2dc4f364d4509d0d9968af5bdf8c6e2ab828abd06ef/freezer.state
	I0815 23:36:27.810723 2197803 api_server.go:204] freezer state: "THAWED"
	I0815 23:36:27.810762 2197803 api_server.go:253] Checking apiserver healthz at https://192.168.67.2:8443/healthz ...
	I0815 23:36:27.818507 2197803 api_server.go:279] https://192.168.67.2:8443/healthz returned 200:
	ok
	I0815 23:36:27.818535 2197803 status.go:422] multinode-196543 apiserver status = Running (err=<nil>)
	I0815 23:36:27.818546 2197803 status.go:257] multinode-196543 status: &{Name:multinode-196543 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0815 23:36:27.818565 2197803 status.go:255] checking status of multinode-196543-m02 ...
	I0815 23:36:27.818894 2197803 cli_runner.go:164] Run: docker container inspect multinode-196543-m02 --format={{.State.Status}}
	I0815 23:36:27.835155 2197803 status.go:330] multinode-196543-m02 host status = "Running" (err=<nil>)
	I0815 23:36:27.835185 2197803 host.go:66] Checking if "multinode-196543-m02" exists ...
	I0815 23:36:27.835509 2197803 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-196543-m02
	I0815 23:36:27.852587 2197803 host.go:66] Checking if "multinode-196543-m02" exists ...
	I0815 23:36:27.852938 2197803 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I0815 23:36:27.852986 2197803 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-196543-m02
	I0815 23:36:27.873997 2197803 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34869 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/multinode-196543-m02/id_rsa Username:docker}
	I0815 23:36:27.962763 2197803 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I0815 23:36:27.975034 2197803 status.go:257] multinode-196543-m02 status: &{Name:multinode-196543-m02 Host:Running Kubelet:Running APIServer:Irrelevant Kubeconfig:Irrelevant Worker:true TimeToStop: DockerEnv: PodManEnv:}
	I0815 23:36:27.975078 2197803 status.go:255] checking status of multinode-196543-m03 ...
	I0815 23:36:27.975411 2197803 cli_runner.go:164] Run: docker container inspect multinode-196543-m03 --format={{.State.Status}}
	I0815 23:36:27.992071 2197803 status.go:330] multinode-196543-m03 host status = "Stopped" (err=<nil>)
	I0815 23:36:27.992096 2197803 status.go:343] host is not running, skipping remaining checks
	I0815 23:36:27.992104 2197803 status.go:257] multinode-196543-m03 status: &{Name:multinode-196543-m03 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiNode/serial/StopNode (2.23s)

                                                
                                    
x
+
TestMultiNode/serial/StartAfterStop (11.61s)

                                                
                                                
=== RUN   TestMultiNode/serial/StartAfterStop
multinode_test.go:282: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 node start m03 -v=7 --alsologtostderr
multinode_test.go:282: (dbg) Done: out/minikube-linux-arm64 -p multinode-196543 node start m03 -v=7 --alsologtostderr: (10.84704929s)
multinode_test.go:290: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 status -v=7 --alsologtostderr
multinode_test.go:306: (dbg) Run:  kubectl get nodes
--- PASS: TestMultiNode/serial/StartAfterStop (11.61s)

                                                
                                    
x
+
TestMultiNode/serial/RestartKeepsNodes (99.72s)

                                                
                                                
=== RUN   TestMultiNode/serial/RestartKeepsNodes
multinode_test.go:314: (dbg) Run:  out/minikube-linux-arm64 node list -p multinode-196543
multinode_test.go:321: (dbg) Run:  out/minikube-linux-arm64 stop -p multinode-196543
multinode_test.go:321: (dbg) Done: out/minikube-linux-arm64 stop -p multinode-196543: (22.776605088s)
multinode_test.go:326: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-196543 --wait=true -v=8 --alsologtostderr
E0815 23:37:23.592987 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
multinode_test.go:326: (dbg) Done: out/minikube-linux-arm64 start -p multinode-196543 --wait=true -v=8 --alsologtostderr: (1m16.793516857s)
multinode_test.go:331: (dbg) Run:  out/minikube-linux-arm64 node list -p multinode-196543
--- PASS: TestMultiNode/serial/RestartKeepsNodes (99.72s)

                                                
                                    
x
+
TestMultiNode/serial/DeleteNode (5.64s)

                                                
                                                
=== RUN   TestMultiNode/serial/DeleteNode
multinode_test.go:416: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 node delete m03
multinode_test.go:416: (dbg) Done: out/minikube-linux-arm64 -p multinode-196543 node delete m03: (4.968393107s)
multinode_test.go:422: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 status --alsologtostderr
multinode_test.go:436: (dbg) Run:  kubectl get nodes
multinode_test.go:444: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiNode/serial/DeleteNode (5.64s)

                                                
                                    
x
+
TestMultiNode/serial/StopMultiNode (21.71s)

                                                
                                                
=== RUN   TestMultiNode/serial/StopMultiNode
multinode_test.go:345: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 stop
multinode_test.go:345: (dbg) Done: out/minikube-linux-arm64 -p multinode-196543 stop: (21.533548185s)
multinode_test.go:351: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 status
multinode_test.go:351: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-196543 status: exit status 7 (87.786945ms)

                                                
                                                
-- stdout --
	multinode-196543
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	multinode-196543-m02
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
multinode_test.go:358: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 status --alsologtostderr
multinode_test.go:358: (dbg) Non-zero exit: out/minikube-linux-arm64 -p multinode-196543 status --alsologtostderr: exit status 7 (84.033278ms)

                                                
                                                
-- stdout --
	multinode-196543
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	multinode-196543-m02
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I0815 23:38:46.638741 2211340 out.go:345] Setting OutFile to fd 1 ...
	I0815 23:38:46.638878 2211340 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:38:46.638888 2211340 out.go:358] Setting ErrFile to fd 2...
	I0815 23:38:46.638894 2211340 out.go:392] TERM=,COLORTERM=, which probably does not support color
	I0815 23:38:46.639127 2211340 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
	I0815 23:38:46.639300 2211340 out.go:352] Setting JSON to false
	I0815 23:38:46.639337 2211340 mustload.go:65] Loading cluster: multinode-196543
	I0815 23:38:46.639724 2211340 config.go:182] Loaded profile config "multinode-196543": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
	I0815 23:38:46.639741 2211340 status.go:255] checking status of multinode-196543 ...
	I0815 23:38:46.640199 2211340 cli_runner.go:164] Run: docker container inspect multinode-196543 --format={{.State.Status}}
	I0815 23:38:46.640462 2211340 notify.go:220] Checking for updates...
	I0815 23:38:46.657713 2211340 status.go:330] multinode-196543 host status = "Stopped" (err=<nil>)
	I0815 23:38:46.657736 2211340 status.go:343] host is not running, skipping remaining checks
	I0815 23:38:46.657743 2211340 status.go:257] multinode-196543 status: &{Name:multinode-196543 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I0815 23:38:46.657766 2211340 status.go:255] checking status of multinode-196543-m02 ...
	I0815 23:38:46.658090 2211340 cli_runner.go:164] Run: docker container inspect multinode-196543-m02 --format={{.State.Status}}
	I0815 23:38:46.673902 2211340 status.go:330] multinode-196543-m02 host status = "Stopped" (err=<nil>)
	I0815 23:38:46.673924 2211340 status.go:343] host is not running, skipping remaining checks
	I0815 23:38:46.673931 2211340 status.go:257] multinode-196543-m02 status: &{Name:multinode-196543-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiNode/serial/StopMultiNode (21.71s)

                                                
                                    
x
+
TestMultiNode/serial/RestartMultiNode (53.14s)

                                                
                                                
=== RUN   TestMultiNode/serial/RestartMultiNode
multinode_test.go:376: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-196543 --wait=true -v=8 --alsologtostderr --driver=docker  --container-runtime=docker
E0815 23:39:36.182370 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
multinode_test.go:376: (dbg) Done: out/minikube-linux-arm64 start -p multinode-196543 --wait=true -v=8 --alsologtostderr --driver=docker  --container-runtime=docker: (52.46717002s)
multinode_test.go:382: (dbg) Run:  out/minikube-linux-arm64 -p multinode-196543 status --alsologtostderr
multinode_test.go:396: (dbg) Run:  kubectl get nodes
multinode_test.go:404: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiNode/serial/RestartMultiNode (53.14s)

                                                
                                    
x
+
TestMultiNode/serial/ValidateNameConflict (34.96s)

                                                
                                                
=== RUN   TestMultiNode/serial/ValidateNameConflict
multinode_test.go:455: (dbg) Run:  out/minikube-linux-arm64 node list -p multinode-196543
multinode_test.go:464: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-196543-m02 --driver=docker  --container-runtime=docker
multinode_test.go:464: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p multinode-196543-m02 --driver=docker  --container-runtime=docker: exit status 14 (83.766141ms)

                                                
                                                
-- stdout --
	* [multinode-196543-m02] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=19452
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	! Profile name 'multinode-196543-m02' is duplicated with machine name 'multinode-196543-m02' in profile 'multinode-196543'
	X Exiting due to MK_USAGE: Profile name should be unique

                                                
                                                
** /stderr **
multinode_test.go:472: (dbg) Run:  out/minikube-linux-arm64 start -p multinode-196543-m03 --driver=docker  --container-runtime=docker
multinode_test.go:472: (dbg) Done: out/minikube-linux-arm64 start -p multinode-196543-m03 --driver=docker  --container-runtime=docker: (32.393433405s)
multinode_test.go:479: (dbg) Run:  out/minikube-linux-arm64 node add -p multinode-196543
multinode_test.go:479: (dbg) Non-zero exit: out/minikube-linux-arm64 node add -p multinode-196543: exit status 80 (341.735854ms)

                                                
                                                
-- stdout --
	* Adding node m03 to cluster multinode-196543 as [worker]
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to GUEST_NODE_ADD: failed to add node: Node multinode-196543-m03 already exists in multinode-196543-m03 profile
	* 
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│    * Please also attach the following file to the GitHub issue:                             │
	│    * - /tmp/minikube_node_040ea7097fd6ed71e65be9a474587f81f0ccd21d_1.log                    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯

                                                
                                                
** /stderr **
multinode_test.go:484: (dbg) Run:  out/minikube-linux-arm64 delete -p multinode-196543-m03
multinode_test.go:484: (dbg) Done: out/minikube-linux-arm64 delete -p multinode-196543-m03: (2.085776305s)
--- PASS: TestMultiNode/serial/ValidateNameConflict (34.96s)

                                                
                                    
x
+
TestPreload (113.06s)

                                                
                                                
=== RUN   TestPreload
preload_test.go:44: (dbg) Run:  out/minikube-linux-arm64 start -p test-preload-672238 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.24.4
E0815 23:41:00.530849 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
preload_test.go:44: (dbg) Done: out/minikube-linux-arm64 start -p test-preload-672238 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.24.4: (1m3.60497851s)
preload_test.go:52: (dbg) Run:  out/minikube-linux-arm64 -p test-preload-672238 image pull gcr.io/k8s-minikube/busybox
preload_test.go:52: (dbg) Done: out/minikube-linux-arm64 -p test-preload-672238 image pull gcr.io/k8s-minikube/busybox: (1.23752265s)
preload_test.go:58: (dbg) Run:  out/minikube-linux-arm64 stop -p test-preload-672238
preload_test.go:58: (dbg) Done: out/minikube-linux-arm64 stop -p test-preload-672238: (10.895603467s)
preload_test.go:66: (dbg) Run:  out/minikube-linux-arm64 start -p test-preload-672238 --memory=2200 --alsologtostderr -v=1 --wait=true --driver=docker  --container-runtime=docker
preload_test.go:66: (dbg) Done: out/minikube-linux-arm64 start -p test-preload-672238 --memory=2200 --alsologtostderr -v=1 --wait=true --driver=docker  --container-runtime=docker: (34.948450091s)
preload_test.go:71: (dbg) Run:  out/minikube-linux-arm64 -p test-preload-672238 image list
helpers_test.go:175: Cleaning up "test-preload-672238" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p test-preload-672238
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p test-preload-672238: (2.162524486s)
--- PASS: TestPreload (113.06s)

                                                
                                    
x
+
TestScheduledStopUnix (104.36s)

                                                
                                                
=== RUN   TestScheduledStopUnix
scheduled_stop_test.go:128: (dbg) Run:  out/minikube-linux-arm64 start -p scheduled-stop-618700 --memory=2048 --driver=docker  --container-runtime=docker
scheduled_stop_test.go:128: (dbg) Done: out/minikube-linux-arm64 start -p scheduled-stop-618700 --memory=2048 --driver=docker  --container-runtime=docker: (31.163490669s)
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-618700 --schedule 5m
scheduled_stop_test.go:191: (dbg) Run:  out/minikube-linux-arm64 status --format={{.TimeToStop}} -p scheduled-stop-618700 -n scheduled-stop-618700
scheduled_stop_test.go:169: signal error was:  <nil>
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-618700 --schedule 15s
scheduled_stop_test.go:169: signal error was:  os: process already finished
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-618700 --cancel-scheduled
scheduled_stop_test.go:176: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-618700 -n scheduled-stop-618700
scheduled_stop_test.go:205: (dbg) Run:  out/minikube-linux-arm64 status -p scheduled-stop-618700
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-arm64 stop -p scheduled-stop-618700 --schedule 15s
scheduled_stop_test.go:169: signal error was:  os: process already finished
scheduled_stop_test.go:205: (dbg) Run:  out/minikube-linux-arm64 status -p scheduled-stop-618700
scheduled_stop_test.go:205: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p scheduled-stop-618700: exit status 7 (66.254286ms)

                                                
                                                
-- stdout --
	scheduled-stop-618700
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	

                                                
                                                
-- /stdout --
scheduled_stop_test.go:176: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-618700 -n scheduled-stop-618700
scheduled_stop_test.go:176: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p scheduled-stop-618700 -n scheduled-stop-618700: exit status 7 (70.569622ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
scheduled_stop_test.go:176: status error: exit status 7 (may be ok)
helpers_test.go:175: Cleaning up "scheduled-stop-618700" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p scheduled-stop-618700
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p scheduled-stop-618700: (1.699780982s)
--- PASS: TestScheduledStopUnix (104.36s)

                                                
                                    
x
+
TestSkaffold (117.25s)

                                                
                                                
=== RUN   TestSkaffold
skaffold_test.go:59: (dbg) Run:  /tmp/skaffold.exe1892287627 version
skaffold_test.go:63: skaffold version: v2.13.1
skaffold_test.go:66: (dbg) Run:  out/minikube-linux-arm64 start -p skaffold-624546 --memory=2600 --driver=docker  --container-runtime=docker
skaffold_test.go:66: (dbg) Done: out/minikube-linux-arm64 start -p skaffold-624546 --memory=2600 --driver=docker  --container-runtime=docker: (32.858900365s)
skaffold_test.go:86: copying out/minikube-linux-arm64 to /home/jenkins/workspace/Docker_Linux_docker_arm64/out/minikube
skaffold_test.go:105: (dbg) Run:  /tmp/skaffold.exe1892287627 run --minikube-profile skaffold-624546 --kube-context skaffold-624546 --status-check=true --port-forward=false --interactive=false
E0815 23:44:36.182505 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
skaffold_test.go:105: (dbg) Done: /tmp/skaffold.exe1892287627 run --minikube-profile skaffold-624546 --kube-context skaffold-624546 --status-check=true --port-forward=false --interactive=false: (1m9.111208886s)
skaffold_test.go:111: (dbg) TestSkaffold: waiting 1m0s for pods matching "app=leeroy-app" in namespace "default" ...
helpers_test.go:344: "leeroy-app-7b97567768-pmpdw" [b2d8f825-6279-405c-a84b-f1fc78a1d511] Running
skaffold_test.go:111: (dbg) TestSkaffold: app=leeroy-app healthy within 6.004590137s
skaffold_test.go:114: (dbg) TestSkaffold: waiting 1m0s for pods matching "app=leeroy-web" in namespace "default" ...
helpers_test.go:344: "leeroy-web-8c7cc9cd8-qb7lp" [ebff26b1-1f95-40fa-8a46-4b646f5e10d8] Running
skaffold_test.go:114: (dbg) TestSkaffold: app=leeroy-web healthy within 5.004120445s
helpers_test.go:175: Cleaning up "skaffold-624546" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p skaffold-624546
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p skaffold-624546: (2.978902866s)
--- PASS: TestSkaffold (117.25s)

                                                
                                    
x
+
TestInsufficientStorage (12.26s)

                                                
                                                
=== RUN   TestInsufficientStorage
status_test.go:50: (dbg) Run:  out/minikube-linux-arm64 start -p insufficient-storage-483737 --memory=2048 --output=json --wait=true --driver=docker  --container-runtime=docker
E0815 23:46:00.530904 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
status_test.go:50: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p insufficient-storage-483737 --memory=2048 --output=json --wait=true --driver=docker  --container-runtime=docker: exit status 26 (9.993616658s)

                                                
                                                
-- stdout --
	{"specversion":"1.0","id":"8511c806-984b-4ff1-bf78-6993ff302e1c","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"0","message":"[insufficient-storage-483737] minikube v1.33.1 on Ubuntu 20.04 (arm64)","name":"Initial Minikube Setup","totalsteps":"19"}}
	{"specversion":"1.0","id":"0e5a0067-2f19-4def-a1a9-4cd046a2dfcf","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_LOCATION=19452"}}
	{"specversion":"1.0","id":"75483ded-4e7a-46f6-b026-ba66b0edcfaa","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true"}}
	{"specversion":"1.0","id":"2dfd8041-8196-4411-812d-8ae06350cc62","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig"}}
	{"specversion":"1.0","id":"85b0da92-9b7a-4f98-a944-79e7a1f49241","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube"}}
	{"specversion":"1.0","id":"1d7cac23-ceb7-47c5-b0d0-c378235796ae","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_BIN=out/minikube-linux-arm64"}}
	{"specversion":"1.0","id":"a0055a5b-eecf-48eb-b337-dc4d22260db1","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_FORCE_SYSTEMD="}}
	{"specversion":"1.0","id":"7fcdeec6-cccb-4245-ac92-a034ddc21392","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_TEST_STORAGE_CAPACITY=100"}}
	{"specversion":"1.0","id":"3a614ce0-769c-468a-b744-761d856e3610","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_TEST_AVAILABLE_STORAGE=19"}}
	{"specversion":"1.0","id":"9e6623c2-dd11-48bd-850e-6e2e546f1305","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"1","message":"Using the docker driver based on user configuration","name":"Selecting Driver","totalsteps":"19"}}
	{"specversion":"1.0","id":"28a4d51c-9d17-4e29-8414-5a9d7bcf03e6","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"Using Docker driver with root privileges"}}
	{"specversion":"1.0","id":"4fa2ea85-d34a-487a-8519-450473b7099e","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"3","message":"Starting \"insufficient-storage-483737\" primary control-plane node in \"insufficient-storage-483737\" cluster","name":"Starting Node","totalsteps":"19"}}
	{"specversion":"1.0","id":"524cb3a8-b7c8-47a6-b454-ce5c671b04f5","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"5","message":"Pulling base image v0.0.44-1723740748-19452 ...","name":"Pulling Base Image","totalsteps":"19"}}
	{"specversion":"1.0","id":"b20998b1-8547-4807-b653-9cc63440b8ae","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"8","message":"Creating docker container (CPUs=2, Memory=2048MB) ...","name":"Creating Container","totalsteps":"19"}}
	{"specversion":"1.0","id":"de0f865a-10b4-4705-a5bf-fb49607ecc49","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.error","datacontenttype":"application/json","data":{"advice":"Try one or more of the following to free up space on the device:\n\t\n\t\t\t1. Run \"docker system prune\" to remove unused Docker data (optionally with \"-a\")\n\t\t\t2. Increase the storage allocated to Docker for Desktop by clicking on:\n\t\t\t\tDocker icon \u003e Preferences \u003e Resources \u003e Disk Image Size\n\t\t\t3. Run \"minikube ssh -- docker system prune\" if using the Docker container runtime","exitcode":"26","issues":"https://github.com/kubernetes/minikube/issues/9024","message":"Docker is out of disk space! (/var is at 100% of capacity). You can pass '--force' to skip this check.","name":"RSRC_DOCKER_STORAGE","url":""}}

                                                
                                                
-- /stdout --
status_test.go:76: (dbg) Run:  out/minikube-linux-arm64 status -p insufficient-storage-483737 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p insufficient-storage-483737 --output=json --layout=cluster: exit status 7 (275.637138ms)

                                                
                                                
-- stdout --
	{"Name":"insufficient-storage-483737","StatusCode":507,"StatusName":"InsufficientStorage","StatusDetail":"/var is almost out of disk space","Step":"Creating Container","StepDetail":"Creating docker container (CPUs=2, Memory=2048MB) ...","BinaryVersion":"v1.33.1","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":500,"StatusName":"Error"}},"Nodes":[{"Name":"insufficient-storage-483737","StatusCode":507,"StatusName":"InsufficientStorage","Components":{"apiserver":{"Name":"apiserver","StatusCode":405,"StatusName":"Stopped"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
** stderr ** 
	E0815 23:46:03.768334 2245455 status.go:417] kubeconfig endpoint: get endpoint: "insufficient-storage-483737" does not appear in /home/jenkins/minikube-integration/19452-2026001/kubeconfig

                                                
                                                
** /stderr **
status_test.go:76: (dbg) Run:  out/minikube-linux-arm64 status -p insufficient-storage-483737 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p insufficient-storage-483737 --output=json --layout=cluster: exit status 7 (285.514197ms)

                                                
                                                
-- stdout --
	{"Name":"insufficient-storage-483737","StatusCode":507,"StatusName":"InsufficientStorage","StatusDetail":"/var is almost out of disk space","BinaryVersion":"v1.33.1","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":500,"StatusName":"Error"}},"Nodes":[{"Name":"insufficient-storage-483737","StatusCode":507,"StatusName":"InsufficientStorage","Components":{"apiserver":{"Name":"apiserver","StatusCode":405,"StatusName":"Stopped"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
** stderr ** 
	E0815 23:46:04.054951 2245518 status.go:417] kubeconfig endpoint: get endpoint: "insufficient-storage-483737" does not appear in /home/jenkins/minikube-integration/19452-2026001/kubeconfig
	E0815 23:46:04.065245 2245518 status.go:560] unable to read event log: stat: stat /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/insufficient-storage-483737/events.json: no such file or directory

                                                
                                                
** /stderr **
helpers_test.go:175: Cleaning up "insufficient-storage-483737" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p insufficient-storage-483737
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p insufficient-storage-483737: (1.706143648s)
--- PASS: TestInsufficientStorage (12.26s)

                                                
                                    
x
+
TestRunningBinaryUpgrade (102.51s)

                                                
                                                
=== RUN   TestRunningBinaryUpgrade
=== PAUSE TestRunningBinaryUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestRunningBinaryUpgrade
version_upgrade_test.go:120: (dbg) Run:  /tmp/minikube-v1.26.0.2174450351 start -p running-upgrade-880167 --memory=2200 --vm-driver=docker  --container-runtime=docker
E0815 23:49:36.182794 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
version_upgrade_test.go:120: (dbg) Done: /tmp/minikube-v1.26.0.2174450351 start -p running-upgrade-880167 --memory=2200 --vm-driver=docker  --container-runtime=docker: (1m8.240828121s)
version_upgrade_test.go:130: (dbg) Run:  out/minikube-linux-arm64 start -p running-upgrade-880167 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
version_upgrade_test.go:130: (dbg) Done: out/minikube-linux-arm64 start -p running-upgrade-880167 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (31.360125912s)
helpers_test.go:175: Cleaning up "running-upgrade-880167" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p running-upgrade-880167
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p running-upgrade-880167: (2.186758277s)
--- PASS: TestRunningBinaryUpgrade (102.51s)

                                                
                                    
x
+
TestKubernetesUpgrade (189.52s)

                                                
                                                
=== RUN   TestKubernetesUpgrade
=== PAUSE TestKubernetesUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestKubernetesUpgrade
version_upgrade_test.go:222: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-648387 --memory=2200 --kubernetes-version=v1.20.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
E0815 23:52:01.454521 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
version_upgrade_test.go:222: (dbg) Done: out/minikube-linux-arm64 start -p kubernetes-upgrade-648387 --memory=2200 --kubernetes-version=v1.20.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (1m4.179722958s)
version_upgrade_test.go:227: (dbg) Run:  out/minikube-linux-arm64 stop -p kubernetes-upgrade-648387
version_upgrade_test.go:227: (dbg) Done: out/minikube-linux-arm64 stop -p kubernetes-upgrade-648387: (10.936021432s)
version_upgrade_test.go:232: (dbg) Run:  out/minikube-linux-arm64 -p kubernetes-upgrade-648387 status --format={{.Host}}
version_upgrade_test.go:232: (dbg) Non-zero exit: out/minikube-linux-arm64 -p kubernetes-upgrade-648387 status --format={{.Host}}: exit status 7 (88.941062ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
version_upgrade_test.go:234: status error: exit status 7 (may be ok)
version_upgrade_test.go:243: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-648387 --memory=2200 --kubernetes-version=v1.31.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
E0815 23:53:23.375842 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
version_upgrade_test.go:243: (dbg) Done: out/minikube-linux-arm64 start -p kubernetes-upgrade-648387 --memory=2200 --kubernetes-version=v1.31.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (33.587384713s)
version_upgrade_test.go:248: (dbg) Run:  kubectl --context kubernetes-upgrade-648387 version --output=json
version_upgrade_test.go:267: Attempting to downgrade Kubernetes (should fail)
version_upgrade_test.go:269: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-648387 --memory=2200 --kubernetes-version=v1.20.0 --driver=docker  --container-runtime=docker
version_upgrade_test.go:269: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p kubernetes-upgrade-648387 --memory=2200 --kubernetes-version=v1.20.0 --driver=docker  --container-runtime=docker: exit status 106 (96.788579ms)

                                                
                                                
-- stdout --
	* [kubernetes-upgrade-648387] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=19452
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to K8S_DOWNGRADE_UNSUPPORTED: Unable to safely downgrade existing Kubernetes v1.31.0 cluster to v1.20.0
	* Suggestion: 
	
	    1) Recreate the cluster with Kubernetes 1.20.0, by running:
	    
	    minikube delete -p kubernetes-upgrade-648387
	    minikube start -p kubernetes-upgrade-648387 --kubernetes-version=v1.20.0
	    
	    2) Create a second cluster with Kubernetes 1.20.0, by running:
	    
	    minikube start -p kubernetes-upgrade-6483872 --kubernetes-version=v1.20.0
	    
	    3) Use the existing cluster at version Kubernetes 1.31.0, by running:
	    
	    minikube start -p kubernetes-upgrade-648387 --kubernetes-version=v1.31.0
	    

                                                
                                                
** /stderr **
version_upgrade_test.go:273: Attempting restart after unsuccessful downgrade
version_upgrade_test.go:275: (dbg) Run:  out/minikube-linux-arm64 start -p kubernetes-upgrade-648387 --memory=2200 --kubernetes-version=v1.31.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
version_upgrade_test.go:275: (dbg) Done: out/minikube-linux-arm64 start -p kubernetes-upgrade-648387 --memory=2200 --kubernetes-version=v1.31.0 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (1m17.726934158s)
helpers_test.go:175: Cleaning up "kubernetes-upgrade-648387" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p kubernetes-upgrade-648387
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p kubernetes-upgrade-648387: (2.787054098s)
--- PASS: TestKubernetesUpgrade (189.52s)

                                                
                                    
x
+
TestMissingContainerUpgrade (111.7s)

                                                
                                                
=== RUN   TestMissingContainerUpgrade
=== PAUSE TestMissingContainerUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestMissingContainerUpgrade
version_upgrade_test.go:309: (dbg) Run:  /tmp/minikube-v1.26.0.2850803086 start -p missing-upgrade-657825 --memory=2200 --driver=docker  --container-runtime=docker
E0815 23:50:39.510310 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:50:39.516670 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:50:39.528035 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:50:39.549409 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:50:39.591440 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:50:39.672890 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:50:39.834319 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:50:40.156077 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:50:40.798188 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:50:42.080187 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:50:44.641787 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:50:49.763880 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:51:00.010727 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0815 23:51:00.530363 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
version_upgrade_test.go:309: (dbg) Done: /tmp/minikube-v1.26.0.2850803086 start -p missing-upgrade-657825 --memory=2200 --driver=docker  --container-runtime=docker: (36.470611378s)
version_upgrade_test.go:318: (dbg) Run:  docker stop missing-upgrade-657825
version_upgrade_test.go:318: (dbg) Done: docker stop missing-upgrade-657825: (2.148055706s)
version_upgrade_test.go:323: (dbg) Run:  docker rm missing-upgrade-657825
version_upgrade_test.go:329: (dbg) Run:  out/minikube-linux-arm64 start -p missing-upgrade-657825 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
E0815 23:51:20.492867 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
version_upgrade_test.go:329: (dbg) Done: out/minikube-linux-arm64 start -p missing-upgrade-657825 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (1m9.79692094s)
helpers_test.go:175: Cleaning up "missing-upgrade-657825" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p missing-upgrade-657825
helpers_test.go:178: (dbg) Done: out/minikube-linux-arm64 delete -p missing-upgrade-657825: (2.590290777s)
--- PASS: TestMissingContainerUpgrade (111.70s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/Setup (0.72s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/Setup
--- PASS: TestStoppedBinaryUpgrade/Setup (0.72s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/Upgrade (102.37s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/Upgrade
version_upgrade_test.go:183: (dbg) Run:  /tmp/minikube-v1.26.0.3338022998 start -p stopped-upgrade-395569 --memory=2200 --vm-driver=docker  --container-runtime=docker
version_upgrade_test.go:183: (dbg) Done: /tmp/minikube-v1.26.0.3338022998 start -p stopped-upgrade-395569 --memory=2200 --vm-driver=docker  --container-runtime=docker: (55.563914523s)
version_upgrade_test.go:192: (dbg) Run:  /tmp/minikube-v1.26.0.3338022998 -p stopped-upgrade-395569 stop
version_upgrade_test.go:192: (dbg) Done: /tmp/minikube-v1.26.0.3338022998 -p stopped-upgrade-395569 stop: (11.226605039s)
version_upgrade_test.go:198: (dbg) Run:  out/minikube-linux-arm64 start -p stopped-upgrade-395569 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
E0815 23:54:03.595056 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
version_upgrade_test.go:198: (dbg) Done: out/minikube-linux-arm64 start -p stopped-upgrade-395569 --memory=2200 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (35.581929313s)
--- PASS: TestStoppedBinaryUpgrade/Upgrade (102.37s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/MinikubeLogs (1.55s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/MinikubeLogs
version_upgrade_test.go:206: (dbg) Run:  out/minikube-linux-arm64 logs -p stopped-upgrade-395569
version_upgrade_test.go:206: (dbg) Done: out/minikube-linux-arm64 logs -p stopped-upgrade-395569: (1.547465707s)
--- PASS: TestStoppedBinaryUpgrade/MinikubeLogs (1.55s)

                                                
                                    
x
+
TestPause/serial/Start (80.01s)

                                                
                                                
=== RUN   TestPause/serial/Start
pause_test.go:80: (dbg) Run:  out/minikube-linux-arm64 start -p pause-308838 --memory=2048 --install-addons=false --wait=all --driver=docker  --container-runtime=docker
E0815 23:54:36.182195 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
pause_test.go:80: (dbg) Done: out/minikube-linux-arm64 start -p pause-308838 --memory=2048 --install-addons=false --wait=all --driver=docker  --container-runtime=docker: (1m20.01432138s)
--- PASS: TestPause/serial/Start (80.01s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartNoK8sWithVersion (0.09s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartNoK8sWithVersion
no_kubernetes_test.go:83: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-188965 --no-kubernetes --kubernetes-version=1.20 --driver=docker  --container-runtime=docker
no_kubernetes_test.go:83: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p NoKubernetes-188965 --no-kubernetes --kubernetes-version=1.20 --driver=docker  --container-runtime=docker: exit status 14 (91.116065ms)

                                                
                                                
-- stdout --
	* [NoKubernetes-188965] minikube v1.33.1 on Ubuntu 20.04 (arm64)
	  - MINIKUBE_LOCATION=19452
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-arm64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to MK_USAGE: cannot specify --kubernetes-version with --no-kubernetes,
	to unset a global config run:
	
	$ minikube config unset kubernetes-version

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/StartNoK8sWithVersion (0.09s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartWithK8s (36.48s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartWithK8s
no_kubernetes_test.go:95: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-188965 --driver=docker  --container-runtime=docker
no_kubernetes_test.go:95: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-188965 --driver=docker  --container-runtime=docker: (36.115681852s)
no_kubernetes_test.go:200: (dbg) Run:  out/minikube-linux-arm64 -p NoKubernetes-188965 status -o json
--- PASS: TestNoKubernetes/serial/StartWithK8s (36.48s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartWithStopK8s (17.74s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartWithStopK8s
no_kubernetes_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-188965 --no-kubernetes --driver=docker  --container-runtime=docker
no_kubernetes_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-188965 --no-kubernetes --driver=docker  --container-runtime=docker: (15.697288124s)
no_kubernetes_test.go:200: (dbg) Run:  out/minikube-linux-arm64 -p NoKubernetes-188965 status -o json
no_kubernetes_test.go:200: (dbg) Non-zero exit: out/minikube-linux-arm64 -p NoKubernetes-188965 status -o json: exit status 2 (296.543261ms)

                                                
                                                
-- stdout --
	{"Name":"NoKubernetes-188965","Host":"Running","Kubelet":"Stopped","APIServer":"Stopped","Kubeconfig":"Configured","Worker":false}

                                                
                                                
-- /stdout --
no_kubernetes_test.go:124: (dbg) Run:  out/minikube-linux-arm64 delete -p NoKubernetes-188965
no_kubernetes_test.go:124: (dbg) Done: out/minikube-linux-arm64 delete -p NoKubernetes-188965: (1.749166756s)
--- PASS: TestNoKubernetes/serial/StartWithStopK8s (17.74s)

                                                
                                    
x
+
TestPause/serial/SecondStartNoReconfiguration (29.16s)

                                                
                                                
=== RUN   TestPause/serial/SecondStartNoReconfiguration
pause_test.go:92: (dbg) Run:  out/minikube-linux-arm64 start -p pause-308838 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker
E0815 23:55:39.509790 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
pause_test.go:92: (dbg) Done: out/minikube-linux-arm64 start -p pause-308838 --alsologtostderr -v=1 --driver=docker  --container-runtime=docker: (29.12750824s)
--- PASS: TestPause/serial/SecondStartNoReconfiguration (29.16s)

                                                
                                    
x
+
TestNoKubernetes/serial/Start (7.12s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/Start
no_kubernetes_test.go:136: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-188965 --no-kubernetes --driver=docker  --container-runtime=docker
no_kubernetes_test.go:136: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-188965 --no-kubernetes --driver=docker  --container-runtime=docker: (7.123822134s)
--- PASS: TestNoKubernetes/serial/Start (7.12s)

                                                
                                    
x
+
TestNoKubernetes/serial/VerifyK8sNotRunning (0.39s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/VerifyK8sNotRunning
no_kubernetes_test.go:147: (dbg) Run:  out/minikube-linux-arm64 ssh -p NoKubernetes-188965 "sudo systemctl is-active --quiet service kubelet"
no_kubernetes_test.go:147: (dbg) Non-zero exit: out/minikube-linux-arm64 ssh -p NoKubernetes-188965 "sudo systemctl is-active --quiet service kubelet": exit status 1 (394.626529ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/VerifyK8sNotRunning (0.39s)

                                                
                                    
x
+
TestNoKubernetes/serial/ProfileList (1.12s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/ProfileList
no_kubernetes_test.go:169: (dbg) Run:  out/minikube-linux-arm64 profile list
no_kubernetes_test.go:179: (dbg) Run:  out/minikube-linux-arm64 profile list --output=json
--- PASS: TestNoKubernetes/serial/ProfileList (1.12s)

                                                
                                    
x
+
TestNoKubernetes/serial/Stop (1.24s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/Stop
no_kubernetes_test.go:158: (dbg) Run:  out/minikube-linux-arm64 stop -p NoKubernetes-188965
no_kubernetes_test.go:158: (dbg) Done: out/minikube-linux-arm64 stop -p NoKubernetes-188965: (1.241867874s)
--- PASS: TestNoKubernetes/serial/Stop (1.24s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartNoArgs (8.21s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartNoArgs
no_kubernetes_test.go:191: (dbg) Run:  out/minikube-linux-arm64 start -p NoKubernetes-188965 --driver=docker  --container-runtime=docker
no_kubernetes_test.go:191: (dbg) Done: out/minikube-linux-arm64 start -p NoKubernetes-188965 --driver=docker  --container-runtime=docker: (8.211981593s)
--- PASS: TestNoKubernetes/serial/StartNoArgs (8.21s)

                                                
                                    
x
+
TestNoKubernetes/serial/VerifyK8sNotRunningSecond (0.59s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/VerifyK8sNotRunningSecond
no_kubernetes_test.go:147: (dbg) Run:  out/minikube-linux-arm64 ssh -p NoKubernetes-188965 "sudo systemctl is-active --quiet service kubelet"
E0815 23:56:00.530067 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
no_kubernetes_test.go:147: (dbg) Non-zero exit: out/minikube-linux-arm64 ssh -p NoKubernetes-188965 "sudo systemctl is-active --quiet service kubelet": exit status 1 (588.027051ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/VerifyK8sNotRunningSecond (0.59s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/Start (77.95s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p auto-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p auto-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --driver=docker  --container-runtime=docker: (1m17.948376316s)
--- PASS: TestNetworkPlugins/group/auto/Start (77.95s)

                                                
                                    
x
+
TestPause/serial/Pause (0.72s)

                                                
                                                
=== RUN   TestPause/serial/Pause
pause_test.go:110: (dbg) Run:  out/minikube-linux-arm64 pause -p pause-308838 --alsologtostderr -v=5
--- PASS: TestPause/serial/Pause (0.72s)

                                                
                                    
x
+
TestPause/serial/VerifyStatus (0.41s)

                                                
                                                
=== RUN   TestPause/serial/VerifyStatus
status_test.go:76: (dbg) Run:  out/minikube-linux-arm64 status -p pause-308838 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-arm64 status -p pause-308838 --output=json --layout=cluster: exit status 2 (405.535088ms)

                                                
                                                
-- stdout --
	{"Name":"pause-308838","StatusCode":418,"StatusName":"Paused","Step":"Done","StepDetail":"* Paused 12 containers in: kube-system, kubernetes-dashboard, storage-gluster, istio-operator","BinaryVersion":"v1.33.1","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":200,"StatusName":"OK"}},"Nodes":[{"Name":"pause-308838","StatusCode":200,"StatusName":"OK","Components":{"apiserver":{"Name":"apiserver","StatusCode":418,"StatusName":"Paused"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
--- PASS: TestPause/serial/VerifyStatus (0.41s)

                                                
                                    
x
+
TestPause/serial/Unpause (0.83s)

                                                
                                                
=== RUN   TestPause/serial/Unpause
pause_test.go:121: (dbg) Run:  out/minikube-linux-arm64 unpause -p pause-308838 --alsologtostderr -v=5
--- PASS: TestPause/serial/Unpause (0.83s)

                                                
                                    
x
+
TestPause/serial/PauseAgain (0.96s)

                                                
                                                
=== RUN   TestPause/serial/PauseAgain
pause_test.go:110: (dbg) Run:  out/minikube-linux-arm64 pause -p pause-308838 --alsologtostderr -v=5
--- PASS: TestPause/serial/PauseAgain (0.96s)

                                                
                                    
x
+
TestPause/serial/DeletePaused (2.72s)

                                                
                                                
=== RUN   TestPause/serial/DeletePaused
pause_test.go:132: (dbg) Run:  out/minikube-linux-arm64 delete -p pause-308838 --alsologtostderr -v=5
E0815 23:56:07.217351 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
pause_test.go:132: (dbg) Done: out/minikube-linux-arm64 delete -p pause-308838 --alsologtostderr -v=5: (2.717991888s)
--- PASS: TestPause/serial/DeletePaused (2.72s)

                                                
                                    
x
+
TestPause/serial/VerifyDeletedResources (0.31s)

                                                
                                                
=== RUN   TestPause/serial/VerifyDeletedResources
pause_test.go:142: (dbg) Run:  out/minikube-linux-arm64 profile list --output json
pause_test.go:168: (dbg) Run:  docker ps -a
pause_test.go:173: (dbg) Run:  docker volume inspect pause-308838
pause_test.go:173: (dbg) Non-zero exit: docker volume inspect pause-308838: exit status 1 (22.30085ms)

                                                
                                                
-- stdout --
	[]

                                                
                                                
-- /stdout --
** stderr ** 
	Error response from daemon: get pause-308838: no such volume

                                                
                                                
** /stderr **
pause_test.go:178: (dbg) Run:  docker network ls
--- PASS: TestPause/serial/VerifyDeletedResources (0.31s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/Start (60.79s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p flannel-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=flannel --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p flannel-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=flannel --driver=docker  --container-runtime=docker: (1m0.788799809s)
--- PASS: TestNetworkPlugins/group/flannel/Start (60.79s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/ControllerPod (6.01s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/flannel/ControllerPod: waiting 10m0s for pods matching "app=flannel" in namespace "kube-flannel" ...
helpers_test.go:344: "kube-flannel-ds-57rrj" [0da6bdc8-26a7-42a4-870b-a45ede2c633c] Running
net_test.go:120: (dbg) TestNetworkPlugins/group/flannel/ControllerPod: app=flannel healthy within 6.004084576s
--- PASS: TestNetworkPlugins/group/flannel/ControllerPod (6.01s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/KubeletFlags (0.3s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p flannel-055531 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/flannel/KubeletFlags (0.30s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/NetCatPod (11.27s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context flannel-055531 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/flannel/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6fc964789b-gv7pz" [081c41da-1340-44ae-8280-a129c1a1539a] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-6fc964789b-gv7pz" [081c41da-1340-44ae-8280-a129c1a1539a] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/flannel/NetCatPod: app=netcat healthy within 11.004496471s
--- PASS: TestNetworkPlugins/group/flannel/NetCatPod (11.27s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/KubeletFlags (0.34s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p auto-055531 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/auto/KubeletFlags (0.34s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/NetCatPod (11.36s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context auto-055531 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/auto/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6fc964789b-x5m6p" [2294e1d6-1c4a-46e0-8aa3-491f732f5251] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-6fc964789b-x5m6p" [2294e1d6-1c4a-46e0-8aa3-491f732f5251] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/auto/NetCatPod: app=netcat healthy within 11.003415408s
--- PASS: TestNetworkPlugins/group/auto/NetCatPod (11.36s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/DNS (0.22s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/DNS
net_test.go:175: (dbg) Run:  kubectl --context flannel-055531 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/flannel/DNS (0.22s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/Localhost (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/Localhost
net_test.go:194: (dbg) Run:  kubectl --context flannel-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/flannel/Localhost (0.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/HairPin (0.17s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/HairPin
net_test.go:264: (dbg) Run:  kubectl --context flannel-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/flannel/HairPin (0.17s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/DNS (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/DNS
net_test.go:175: (dbg) Run:  kubectl --context auto-055531 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/auto/DNS (0.19s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/Localhost (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/Localhost
net_test.go:194: (dbg) Run:  kubectl --context auto-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/auto/Localhost (0.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/HairPin (0.18s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/HairPin
net_test.go:264: (dbg) Run:  kubectl --context auto-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/auto/HairPin (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/Start (81.43s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p enable-default-cni-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --enable-default-cni=true --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p enable-default-cni-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --enable-default-cni=true --driver=docker  --container-runtime=docker: (1m21.429899339s)
--- PASS: TestNetworkPlugins/group/enable-default-cni/Start (81.43s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/Start (75.88s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p kindnet-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=kindnet --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p kindnet-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=kindnet --driver=docker  --container-runtime=docker: (1m15.884092975s)
--- PASS: TestNetworkPlugins/group/kindnet/Start (75.88s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/ControllerPod (6.01s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/kindnet/ControllerPod: waiting 10m0s for pods matching "app=kindnet" in namespace "kube-system" ...
helpers_test.go:344: "kindnet-qf9xr" [a75af326-7bd4-47e1-8451-20df98a076d9] Running
net_test.go:120: (dbg) TestNetworkPlugins/group/kindnet/ControllerPod: app=kindnet healthy within 6.004887014s
--- PASS: TestNetworkPlugins/group/kindnet/ControllerPod (6.01s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/KubeletFlags (0.28s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p enable-default-cni-055531 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/enable-default-cni/KubeletFlags (0.28s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/NetCatPod (11.28s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context enable-default-cni-055531 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6fc964789b-tfc66" [f0bb2906-9ba7-4c1d-ada8-7930691dc093] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-6fc964789b-tfc66" [f0bb2906-9ba7-4c1d-ada8-7930691dc093] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: app=netcat healthy within 11.003985406s
--- PASS: TestNetworkPlugins/group/enable-default-cni/NetCatPod (11.28s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/KubeletFlags (0.36s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p kindnet-055531 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/kindnet/KubeletFlags (0.36s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/NetCatPod (9.33s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context kindnet-055531 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/kindnet/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6fc964789b-qstd7" [89815e99-414d-4888-97fb-a72c180b51af] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-6fc964789b-qstd7" [89815e99-414d-4888-97fb-a72c180b51af] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/kindnet/NetCatPod: app=netcat healthy within 9.004342088s
--- PASS: TestNetworkPlugins/group/kindnet/NetCatPod (9.33s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/DNS (0.21s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/DNS
net_test.go:175: (dbg) Run:  kubectl --context enable-default-cni-055531 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/enable-default-cni/DNS (0.21s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/Localhost (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/Localhost
net_test.go:194: (dbg) Run:  kubectl --context enable-default-cni-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/enable-default-cni/Localhost (0.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/HairPin (0.18s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/HairPin
net_test.go:264: (dbg) Run:  kubectl --context enable-default-cni-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/enable-default-cni/HairPin (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/DNS (0.26s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/DNS
net_test.go:175: (dbg) Run:  kubectl --context kindnet-055531 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/kindnet/DNS (0.26s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/Localhost (0.18s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/Localhost
net_test.go:194: (dbg) Run:  kubectl --context kindnet-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/kindnet/Localhost (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/HairPin (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/HairPin
net_test.go:264: (dbg) Run:  kubectl --context kindnet-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/kindnet/HairPin (0.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/Start (84.5s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p bridge-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=bridge --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p bridge-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=bridge --driver=docker  --container-runtime=docker: (1m24.497131054s)
--- PASS: TestNetworkPlugins/group/bridge/Start (84.50s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/Start (90.97s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p kubenet-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --network-plugin=kubenet --driver=docker  --container-runtime=docker
E0816 00:00:39.510202 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:01:00.530356 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p kubenet-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --network-plugin=kubenet --driver=docker  --container-runtime=docker: (1m30.968138291s)
--- PASS: TestNetworkPlugins/group/kubenet/Start (90.97s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/KubeletFlags (0.3s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p bridge-055531 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/bridge/KubeletFlags (0.30s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/NetCatPod (11.28s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context bridge-055531 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6fc964789b-hd9g2" [27dcd3c3-e5b8-4872-a297-a7bda2e8a70a] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-6fc964789b-hd9g2" [27dcd3c3-e5b8-4872-a297-a7bda2e8a70a] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: app=netcat healthy within 11.004800345s
--- PASS: TestNetworkPlugins/group/bridge/NetCatPod (11.28s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/KubeletFlags (0.27s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p kubenet-055531 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/kubenet/KubeletFlags (0.27s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/NetCatPod (10.27s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context kubenet-055531 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/kubenet/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6fc964789b-t9bbn" [204e82b1-7803-4c26-bac0-b24a3d02216e] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-6fc964789b-t9bbn" [204e82b1-7803-4c26-bac0-b24a3d02216e] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/kubenet/NetCatPod: app=netcat healthy within 10.005928354s
--- PASS: TestNetworkPlugins/group/kubenet/NetCatPod (10.27s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/DNS (0.23s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/DNS
net_test.go:175: (dbg) Run:  kubectl --context bridge-055531 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/bridge/DNS (0.23s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/Localhost (0.17s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/Localhost
net_test.go:194: (dbg) Run:  kubectl --context bridge-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/bridge/Localhost (0.17s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/HairPin (0.18s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/HairPin
net_test.go:264: (dbg) Run:  kubectl --context bridge-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/bridge/HairPin (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/DNS (0.24s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/DNS
net_test.go:175: (dbg) Run:  kubectl --context kubenet-055531 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/kubenet/DNS (0.24s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/Localhost (0.27s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/Localhost
net_test.go:194: (dbg) Run:  kubectl --context kubenet-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/kubenet/Localhost (0.27s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/HairPin (0.27s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/HairPin
net_test.go:264: (dbg) Run:  kubectl --context kubenet-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/kubenet/HairPin (0.27s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/Start (65.76s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p custom-flannel-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=testdata/kube-flannel.yaml --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p custom-flannel-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=testdata/kube-flannel.yaml --driver=docker  --container-runtime=docker: (1m5.756587169s)
--- PASS: TestNetworkPlugins/group/custom-flannel/Start (65.76s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/Start (77.54s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p calico-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=calico --driver=docker  --container-runtime=docker
E0816 00:02:11.226325 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:11.232738 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:11.244091 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:11.265424 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:11.307080 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:11.389221 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:11.550569 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:11.874020 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:12.515895 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:13.797420 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:16.359323 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:20.772643 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:20.779161 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:20.790490 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:20.811805 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:20.853134 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:20.934520 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:21.095973 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:21.417710 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:21.481143 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:22.059731 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:23.341560 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:25.903323 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:31.025387 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:31.722377 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:41.267622 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:02:52.203667 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p calico-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=calico --driver=docker  --container-runtime=docker: (1m17.542631046s)
--- PASS: TestNetworkPlugins/group/calico/Start (77.54s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/KubeletFlags (0.51s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p custom-flannel-055531 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/custom-flannel/KubeletFlags (0.51s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/NetCatPod (12.43s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context custom-flannel-055531 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/custom-flannel/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6fc964789b-jj42c" [6311f78f-73fb-482f-93fa-08107272b47a] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
E0816 00:03:01.749050 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
helpers_test.go:344: "netcat-6fc964789b-jj42c" [6311f78f-73fb-482f-93fa-08107272b47a] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/custom-flannel/NetCatPod: app=netcat healthy within 12.00447171s
--- PASS: TestNetworkPlugins/group/custom-flannel/NetCatPod (12.43s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/DNS (0.28s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/DNS
net_test.go:175: (dbg) Run:  kubectl --context custom-flannel-055531 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/custom-flannel/DNS (0.28s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/Localhost (0.23s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/Localhost
net_test.go:194: (dbg) Run:  kubectl --context custom-flannel-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/custom-flannel/Localhost (0.23s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/HairPin (0.25s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/HairPin
net_test.go:264: (dbg) Run:  kubectl --context custom-flannel-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/custom-flannel/HairPin (0.25s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/ControllerPod (6.01s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/calico/ControllerPod: waiting 10m0s for pods matching "k8s-app=calico-node" in namespace "kube-system" ...
helpers_test.go:344: "calico-node-l9cvh" [c8c692ec-d847-4eb2-a72c-1022eedae965] Running
net_test.go:120: (dbg) TestNetworkPlugins/group/calico/ControllerPod: k8s-app=calico-node healthy within 6.005092343s
--- PASS: TestNetworkPlugins/group/calico/ControllerPod (6.01s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/KubeletFlags (0.41s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p calico-055531 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/calico/KubeletFlags (0.41s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/NetCatPod (11.39s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context calico-055531 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/calico/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6fc964789b-jkf7j" [5f0106bb-9554-40cb-9278-6dfecae58871] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:344: "netcat-6fc964789b-jkf7j" [5f0106bb-9554-40cb-9278-6dfecae58871] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/calico/NetCatPod: app=netcat healthy within 11.005749343s
--- PASS: TestNetworkPlugins/group/calico/NetCatPod (11.39s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/Start (61.21s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-arm64 start -p false-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=false --driver=docker  --container-runtime=docker
net_test.go:112: (dbg) Done: out/minikube-linux-arm64 start -p false-055531 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=false --driver=docker  --container-runtime=docker: (1m1.21321701s)
--- PASS: TestNetworkPlugins/group/false/Start (61.21s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/DNS (0.47s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/DNS
net_test.go:175: (dbg) Run:  kubectl --context calico-055531 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/calico/DNS (0.47s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/Localhost (0.18s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/Localhost
net_test.go:194: (dbg) Run:  kubectl --context calico-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/calico/Localhost (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/HairPin (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/HairPin
net_test.go:264: (dbg) Run:  kubectl --context calico-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/calico/HairPin (0.19s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/FirstStart (151.9s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-arm64 start -p old-k8s-version-894472 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.20.0
E0816 00:04:12.859242 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:12.865985 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:12.877276 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:12.898616 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:12.942909 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:13.025084 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:13.186286 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:13.507911 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:14.149732 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:15.430993 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:16.870064 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:16.876405 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:16.887752 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:16.909119 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:16.950492 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:17.032683 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:17.194285 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:17.516686 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:17.992331 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:18.163755 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:19.264152 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:19.446211 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:22.008360 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:23.114497 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:27.130172 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:33.355977 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:04:36.183198 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-arm64 start -p old-k8s-version-894472 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.20.0: (2m31.895864291s)
--- PASS: TestStartStop/group/old-k8s-version/serial/FirstStart (151.90s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/KubeletFlags (0.29s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-arm64 ssh -p false-055531 "pgrep -a kubelet"
--- PASS: TestNetworkPlugins/group/false/KubeletFlags (0.29s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/NetCatPod (13.25s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context false-055531 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/false/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:344: "netcat-6fc964789b-k9jc4" [98cf3c33-5105-40dc-ae48-0e044970c147] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
E0816 00:04:37.371516 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
helpers_test.go:344: "netcat-6fc964789b-k9jc4" [98cf3c33-5105-40dc-ae48-0e044970c147] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/false/NetCatPod: app=netcat healthy within 13.00384174s
--- PASS: TestNetworkPlugins/group/false/NetCatPod (13.25s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/DNS (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/DNS
net_test.go:175: (dbg) Run:  kubectl --context false-055531 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/false/DNS (0.19s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/Localhost (0.17s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/Localhost
net_test.go:194: (dbg) Run:  kubectl --context false-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/false/Localhost (0.17s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/HairPin (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/HairPin
net_test.go:264: (dbg) Run:  kubectl --context false-055531 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/false/HairPin (0.20s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/FirstStart (81.62s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-arm64 start -p no-preload-158739 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0
E0816 00:05:34.799115 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:05:38.814565 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:05:39.510124 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:00.530292 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:17.956811 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:17.963267 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:17.974739 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:17.996084 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:18.037591 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:18.119145 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:18.280614 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:18.602449 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:19.244283 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:20.525769 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:23.087187 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:25.654905 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:25.661339 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:25.672863 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:25.694232 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:25.735585 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:25.817037 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:25.978588 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:26.300002 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:26.942261 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:28.209242 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:28.223790 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:30.785863 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-arm64 start -p no-preload-158739 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0: (1m21.617131852s)
--- PASS: TestStartStop/group/no-preload/serial/FirstStart (81.62s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/DeployApp (8.58s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context old-k8s-version-894472 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [d5de3610-de39-4fbf-a381-e0c78f76cf86] Pending
helpers_test.go:344: "busybox" [d5de3610-de39-4fbf-a381-e0c78f76cf86] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
E0816 00:06:35.907960 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
helpers_test.go:344: "busybox" [d5de3610-de39-4fbf-a381-e0c78f76cf86] Running
E0816 00:06:38.451364 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:196: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 8.007931359s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context old-k8s-version-894472 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/old-k8s-version/serial/DeployApp (8.58s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/DeployApp (7.45s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context no-preload-158739 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/no-preload/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [c50c8580-54fd-4db9-9bf6-6aebae3ee75d] Pending
helpers_test.go:344: "busybox" [c50c8580-54fd-4db9-9bf6-6aebae3ee75d] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "busybox" [c50c8580-54fd-4db9-9bf6-6aebae3ee75d] Running
start_stop_delete_test.go:196: (dbg) TestStartStop/group/no-preload/serial/DeployApp: integration-test=busybox healthy within 7.004349406s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context no-preload-158739 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/no-preload/serial/DeployApp (7.45s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/EnableAddonWhileActive (1.32s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p no-preload-158739 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:205: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p no-preload-158739 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.180772837s)
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context no-preload-158739 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/no-preload/serial/EnableAddonWhileActive (1.32s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive (1.75s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p old-k8s-version-894472 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:205: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p old-k8s-version-894472 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.565600907s)
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context old-k8s-version-894472 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive (1.75s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/Stop (11.05s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-arm64 stop -p no-preload-158739 --alsologtostderr -v=3
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-arm64 stop -p no-preload-158739 --alsologtostderr -v=3: (11.051547052s)
--- PASS: TestStartStop/group/no-preload/serial/Stop (11.05s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/Stop (11.72s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-arm64 stop -p old-k8s-version-894472 --alsologtostderr -v=3
E0816 00:06:46.149822 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-arm64 stop -p old-k8s-version-894472 --alsologtostderr -v=3: (11.721666532s)
--- PASS: TestStartStop/group/old-k8s-version/serial/Stop (11.72s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/EnableAddonAfterStop (0.17s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p no-preload-158739 -n no-preload-158739
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p no-preload-158739 -n no-preload-158739: exit status 7 (64.713725ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p no-preload-158739 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/no-preload/serial/EnableAddonAfterStop (0.17s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/SecondStart (281.1s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p no-preload-158739 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-arm64 start -p no-preload-158739 --memory=2200 --alsologtostderr --wait=true --preload=false --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0: (4m40.733459499s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p no-preload-158739 -n no-preload-158739
--- PASS: TestStartStop/group/no-preload/serial/SecondStart (281.10s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop (0.26s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-894472 -n old-k8s-version-894472
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-894472 -n old-k8s-version-894472: exit status 7 (110.971372ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p old-k8s-version-894472 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop (0.26s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/UserAppExistsAfterStop (6.01s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/no-preload/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-695b96c756-t92vl" [48a10206-33c4-48e3-91fb-d97da3c4c671] Running
start_stop_delete_test.go:274: (dbg) TestStartStop/group/no-preload/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.004592452s
--- PASS: TestStartStop/group/no-preload/serial/UserAppExistsAfterStop (6.01s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/AddonExistsAfterStop (5.11s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/no-preload/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-695b96c756-t92vl" [48a10206-33c4-48e3-91fb-d97da3c4c671] Running
E0816 00:11:45.657597 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:287: (dbg) TestStartStop/group/no-preload/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.004201115s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context no-preload-158739 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/no-preload/serial/AddonExistsAfterStop (5.11s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/VerifyKubernetesImages (0.22s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-arm64 -p no-preload-158739 image list --format=json
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/no-preload/serial/VerifyKubernetesImages (0.22s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/Pause (2.87s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 pause -p no-preload-158739 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p no-preload-158739 -n no-preload-158739
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p no-preload-158739 -n no-preload-158739: exit status 2 (339.552112ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p no-preload-158739 -n no-preload-158739
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p no-preload-158739 -n no-preload-158739: exit status 2 (320.671617ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 unpause -p no-preload-158739 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p no-preload-158739 -n no-preload-158739
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p no-preload-158739 -n no-preload-158739
--- PASS: TestStartStop/group/no-preload/serial/Pause (2.87s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/FirstStart (77.73s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-arm64 start -p embed-certs-951478 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0
E0816 00:11:53.357770 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:12:11.225675 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:12:20.623475 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:12:20.772415 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:12:59.074753 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-arm64 start -p embed-certs-951478 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0: (1m17.728320337s)
--- PASS: TestStartStop/group/embed-certs/serial/FirstStart (77.73s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/DeployApp (8.36s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context embed-certs-951478 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [847a7c44-fa8e-4fb1-a22b-483df76a2a2c] Pending
helpers_test.go:344: "busybox" [847a7c44-fa8e-4fb1-a22b-483df76a2a2c] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "busybox" [847a7c44-fa8e-4fb1-a22b-483df76a2a2c] Running
start_stop_delete_test.go:196: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: integration-test=busybox healthy within 8.004269517s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context embed-certs-951478 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/embed-certs/serial/DeployApp (8.36s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop (6.01s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-cd95d586-k7zq2" [87338817-a229-416d-8b8d-78bb860655f8] Running
E0816 00:13:18.346148 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:274: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.005522055s
--- PASS: TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop (6.01s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/EnableAddonWhileActive (1.07s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p embed-certs-951478 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context embed-certs-951478 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/embed-certs/serial/EnableAddonWhileActive (1.07s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/Stop (11.19s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-arm64 stop -p embed-certs-951478 --alsologtostderr -v=3
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-arm64 stop -p embed-certs-951478 --alsologtostderr -v=3: (11.189113101s)
--- PASS: TestStartStop/group/embed-certs/serial/Stop (11.19s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop (5.1s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-cd95d586-k7zq2" [87338817-a229-416d-8b8d-78bb860655f8] Running
E0816 00:13:26.776706 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:287: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.004276148s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context old-k8s-version-894472 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop (5.10s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages (0.22s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-arm64 -p old-k8s-version-894472 image list --format=json
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages (0.22s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/Pause (2.97s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 pause -p old-k8s-version-894472 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-894472 -n old-k8s-version-894472
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-894472 -n old-k8s-version-894472: exit status 2 (347.387863ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p old-k8s-version-894472 -n old-k8s-version-894472
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p old-k8s-version-894472 -n old-k8s-version-894472: exit status 2 (311.548975ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 unpause -p old-k8s-version-894472 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-894472 -n old-k8s-version-894472
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p old-k8s-version-894472 -n old-k8s-version-894472
--- PASS: TestStartStop/group/old-k8s-version/serial/Pause (2.97s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/EnableAddonAfterStop (0.26s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p embed-certs-951478 -n embed-certs-951478
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p embed-certs-951478 -n embed-certs-951478: exit status 7 (84.00408ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p embed-certs-951478 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/embed-certs/serial/EnableAddonAfterStop (0.26s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/SecondStart (272.6s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p embed-certs-951478 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-arm64 start -p embed-certs-951478 --memory=2200 --alsologtostderr --wait=true --embed-certs --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0: (4m32.255025132s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p embed-certs-951478 -n embed-certs-951478
--- PASS: TestStartStop/group/embed-certs/serial/SecondStart (272.60s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/FirstStart (53.8s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-arm64 start -p default-k8s-diff-port-715252 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0
E0816 00:13:46.048674 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:14:12.859755 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:14:16.869872 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-arm64 start -p default-k8s-diff-port-715252 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0: (53.80011731s)
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/FirstStart (53.80s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/DeployApp (9.37s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/DeployApp
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context default-k8s-diff-port-715252 create -f testdata/busybox.yaml
start_stop_delete_test.go:196: (dbg) TestStartStop/group/default-k8s-diff-port/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:344: "busybox" [b4d25fe3-3ba9-4abf-bce4-a2bf783c32e6] Pending
helpers_test.go:344: "busybox" [b4d25fe3-3ba9-4abf-bce4-a2bf783c32e6] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:344: "busybox" [b4d25fe3-3ba9-4abf-bce4-a2bf783c32e6] Running
E0816 00:14:36.182558 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:14:36.765110 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:196: (dbg) TestStartStop/group/default-k8s-diff-port/serial/DeployApp: integration-test=busybox healthy within 9.003719221s
start_stop_delete_test.go:196: (dbg) Run:  kubectl --context default-k8s-diff-port-715252 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/DeployApp (9.37s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive (1.08s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p default-k8s-diff-port-715252 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:215: (dbg) Run:  kubectl --context default-k8s-diff-port-715252 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive (1.08s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/Stop (10.86s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-arm64 stop -p default-k8s-diff-port-715252 --alsologtostderr -v=3
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-arm64 stop -p default-k8s-diff-port-715252 --alsologtostderr -v=3: (10.858681294s)
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/Stop (10.86s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop (0.18s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p default-k8s-diff-port-715252 -n default-k8s-diff-port-715252
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p default-k8s-diff-port-715252 -n default-k8s-diff-port-715252: exit status 7 (68.948027ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p default-k8s-diff-port-715252 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop (0.18s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/SecondStart (267.09s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p default-k8s-diff-port-715252 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0
E0816 00:15:04.465428 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:15:39.510373 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:00.530408 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:17.956202 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:25.655342 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:34.395926 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:34.402359 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:34.413795 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:34.435184 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:34.476579 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:34.558087 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:34.719604 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:35.041384 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:35.095840 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:35.102327 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:35.113791 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:35.135224 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:35.176656 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:35.258065 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:35.419591 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:35.683383 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:35.741854 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:36.383756 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:36.965150 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:37.665136 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:39.526675 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:40.226543 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:44.648831 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:45.348525 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:54.891155 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:16:55.590411 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:17:11.225523 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:17:15.372934 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:17:16.072984 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:17:20.772615 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:17:56.335136 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:17:57.034429 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:17:59.074907 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-arm64 start -p default-k8s-diff-port-715252 --memory=2200 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0: (4m26.646556827s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p default-k8s-diff-port-715252 -n default-k8s-diff-port-715252
E0816 00:19:16.869543 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/SecondStart (267.09s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop (6.01s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-695b96c756-nvn8k" [23c6b9c1-9663-42f3-87e6-29a2ee026b74] Running
start_stop_delete_test.go:274: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.005388855s
--- PASS: TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop (6.01s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/AddonExistsAfterStop (6.11s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-695b96c756-nvn8k" [23c6b9c1-9663-42f3-87e6-29a2ee026b74] Running
start_stop_delete_test.go:287: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.005845603s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context embed-certs-951478 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/embed-certs/serial/AddonExistsAfterStop (6.11s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/VerifyKubernetesImages (0.23s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-arm64 -p embed-certs-951478 image list --format=json
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/embed-certs/serial/VerifyKubernetesImages (0.23s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/Pause (2.85s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 pause -p embed-certs-951478 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p embed-certs-951478 -n embed-certs-951478
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p embed-certs-951478 -n embed-certs-951478: exit status 2 (338.669197ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p embed-certs-951478 -n embed-certs-951478
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p embed-certs-951478 -n embed-certs-951478: exit status 2 (306.531234ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 unpause -p embed-certs-951478 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p embed-certs-951478 -n embed-certs-951478
E0816 00:18:18.345519 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p embed-certs-951478 -n embed-certs-951478
--- PASS: TestStartStop/group/embed-certs/serial/Pause (2.85s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/FirstStart (40.28s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/FirstStart
start_stop_delete_test.go:186: (dbg) Run:  out/minikube-linux-arm64 start -p newest-cni-069920 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0
E0816 00:18:34.291479 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:18:43.836567 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:186: (dbg) Done: out/minikube-linux-arm64 start -p newest-cni-069920 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0: (40.275339962s)
--- PASS: TestStartStop/group/newest-cni/serial/FirstStart (40.28s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/DeployApp (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/DeployApp
--- PASS: TestStartStop/group/newest-cni/serial/DeployApp (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/EnableAddonWhileActive (1.18s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/EnableAddonWhileActive
start_stop_delete_test.go:205: (dbg) Run:  out/minikube-linux-arm64 addons enable metrics-server -p newest-cni-069920 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:205: (dbg) Done: out/minikube-linux-arm64 addons enable metrics-server -p newest-cni-069920 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.175152783s)
start_stop_delete_test.go:211: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/EnableAddonWhileActive (1.18s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/Stop (5.81s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/Stop
start_stop_delete_test.go:228: (dbg) Run:  out/minikube-linux-arm64 stop -p newest-cni-069920 --alsologtostderr -v=3
start_stop_delete_test.go:228: (dbg) Done: out/minikube-linux-arm64 stop -p newest-cni-069920 --alsologtostderr -v=3: (5.810721628s)
--- PASS: TestStartStop/group/newest-cni/serial/Stop (5.81s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/EnableAddonAfterStop (0.27s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/EnableAddonAfterStop
start_stop_delete_test.go:239: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p newest-cni-069920 -n newest-cni-069920
start_stop_delete_test.go:239: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p newest-cni-069920 -n newest-cni-069920: exit status 7 (105.825465ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:239: status error: exit status 7 (may be ok)
start_stop_delete_test.go:246: (dbg) Run:  out/minikube-linux-arm64 addons enable dashboard -p newest-cni-069920 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/newest-cni/serial/EnableAddonAfterStop (0.27s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/SecondStart (19.51s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run:  out/minikube-linux-arm64 start -p newest-cni-069920 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0
E0816 00:19:12.859801 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:256: (dbg) Done: out/minikube-linux-arm64 start -p newest-cni-069920 --memory=2200 --alsologtostderr --wait=apiserver,system_pods,default_sa --feature-gates ServerSideApply=true --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker  --container-runtime=docker --kubernetes-version=v1.31.0: (19.088493418s)
start_stop_delete_test.go:262: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Host}} -p newest-cni-069920 -n newest-cni-069920
--- PASS: TestStartStop/group/newest-cni/serial/SecondStart (19.51s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop (6.01s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop
start_stop_delete_test.go:274: (dbg) TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-695b96c756-j4fxk" [a9424040-7390-4287-bfc8-e4103aafd028] Running
E0816 00:19:18.256825 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:19:18.956366 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/no-preload-158739/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:274: (dbg) TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.004191088s
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop (6.01s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop (6.19s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop
start_stop_delete_test.go:287: (dbg) TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:344: "kubernetes-dashboard-695b96c756-j4fxk" [a9424040-7390-4287-bfc8-e4103aafd028] Running
start_stop_delete_test.go:287: (dbg) TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.004131047s
start_stop_delete_test.go:291: (dbg) Run:  kubectl --context default-k8s-diff-port-715252 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop (6.19s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop
start_stop_delete_test.go:273: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/AddonExistsAfterStop (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/AddonExistsAfterStop
start_stop_delete_test.go:284: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/AddonExistsAfterStop (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/VerifyKubernetesImages (0.59s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-arm64 -p newest-cni-069920 image list --format=json
--- PASS: TestStartStop/group/newest-cni/serial/VerifyKubernetesImages (0.59s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages (0.31s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages
start_stop_delete_test.go:304: (dbg) Run:  out/minikube-linux-arm64 -p default-k8s-diff-port-715252 image list --format=json
start_stop_delete_test.go:304: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages (0.31s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/Pause (4.34s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 pause -p default-k8s-diff-port-715252 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Done: out/minikube-linux-arm64 pause -p default-k8s-diff-port-715252 --alsologtostderr -v=1: (1.26588553s)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p default-k8s-diff-port-715252 -n default-k8s-diff-port-715252
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p default-k8s-diff-port-715252 -n default-k8s-diff-port-715252: exit status 2 (421.023967ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p default-k8s-diff-port-715252 -n default-k8s-diff-port-715252
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p default-k8s-diff-port-715252 -n default-k8s-diff-port-715252: exit status 2 (393.429314ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 unpause -p default-k8s-diff-port-715252 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p default-k8s-diff-port-715252 -n default-k8s-diff-port-715252
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p default-k8s-diff-port-715252 -n default-k8s-diff-port-715252
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/Pause (4.34s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/Pause (4.49s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/Pause
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 pause -p newest-cni-069920 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Done: out/minikube-linux-arm64 pause -p newest-cni-069920 --alsologtostderr -v=1: (1.223075672s)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p newest-cni-069920 -n newest-cni-069920
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p newest-cni-069920 -n newest-cni-069920: exit status 2 (435.091368ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p newest-cni-069920 -n newest-cni-069920
start_stop_delete_test.go:311: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Kubelet}} -p newest-cni-069920 -n newest-cni-069920: exit status 2 (414.218713ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:311: status error: exit status 2 (may be ok)
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 unpause -p newest-cni-069920 --alsologtostderr -v=1
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.APIServer}} -p newest-cni-069920 -n newest-cni-069920
start_stop_delete_test.go:311: (dbg) Run:  out/minikube-linux-arm64 status --format={{.Kubelet}} -p newest-cni-069920 -n newest-cni-069920
--- PASS: TestStartStop/group/newest-cni/serial/Pause (4.49s)

                                                
                                    

Test skip (24/343)

x
+
TestDownloadOnly/v1.20.0/cached-images (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/cached-images
aaa_download_only_test.go:129: Preload exists, images won't be cached
--- SKIP: TestDownloadOnly/v1.20.0/cached-images (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.20.0/binaries (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/binaries
aaa_download_only_test.go:151: Preload exists, binaries are present within.
--- SKIP: TestDownloadOnly/v1.20.0/binaries (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.20.0/kubectl (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.20.0/kubectl
aaa_download_only_test.go:167: Test for darwin and windows
--- SKIP: TestDownloadOnly/v1.20.0/kubectl (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.31.0/cached-images (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.31.0/cached-images
aaa_download_only_test.go:129: Preload exists, images won't be cached
--- SKIP: TestDownloadOnly/v1.31.0/cached-images (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.31.0/binaries (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.31.0/binaries
aaa_download_only_test.go:151: Preload exists, binaries are present within.
--- SKIP: TestDownloadOnly/v1.31.0/binaries (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.31.0/kubectl (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.31.0/kubectl
aaa_download_only_test.go:167: Test for darwin and windows
--- SKIP: TestDownloadOnly/v1.31.0/kubectl (0.00s)

                                                
                                    
x
+
TestDownloadOnlyKic (0.54s)

                                                
                                                
=== RUN   TestDownloadOnlyKic
aaa_download_only_test.go:232: (dbg) Run:  out/minikube-linux-arm64 start --download-only -p download-docker-599739 --alsologtostderr --driver=docker  --container-runtime=docker
aaa_download_only_test.go:244: Skip for arm64 platform. See https://github.com/kubernetes/minikube/issues/10144
helpers_test.go:175: Cleaning up "download-docker-599739" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p download-docker-599739
--- SKIP: TestDownloadOnlyKic (0.54s)

                                                
                                    
x
+
TestAddons/parallel/HelmTiller (0s)

                                                
                                                
=== RUN   TestAddons/parallel/HelmTiller
=== PAUSE TestAddons/parallel/HelmTiller

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/HelmTiller
addons_test.go:446: skip Helm test on arm64
--- SKIP: TestAddons/parallel/HelmTiller (0.00s)

                                                
                                    
x
+
TestAddons/parallel/Olm (0s)

                                                
                                                
=== RUN   TestAddons/parallel/Olm
=== PAUSE TestAddons/parallel/Olm

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Olm
addons_test.go:500: Skipping OLM addon test until https://github.com/operator-framework/operator-lifecycle-manager/issues/2534 is resolved
--- SKIP: TestAddons/parallel/Olm (0.00s)

                                                
                                    
x
+
TestDockerEnvContainerd (0s)

                                                
                                                
=== RUN   TestDockerEnvContainerd
docker_test.go:170: running with docker true linux arm64
docker_test.go:172: skipping: TestDockerEnvContainerd can only be run with the containerd runtime on Docker driver
--- SKIP: TestDockerEnvContainerd (0.00s)

                                                
                                    
x
+
TestKVMDriverInstallOrUpdate (0s)

                                                
                                                
=== RUN   TestKVMDriverInstallOrUpdate
driver_install_or_update_test.go:45: Skip if arm64. See https://github.com/kubernetes/minikube/issues/10144
--- SKIP: TestKVMDriverInstallOrUpdate (0.00s)

                                                
                                    
x
+
TestHyperKitDriverInstallOrUpdate (0s)

                                                
                                                
=== RUN   TestHyperKitDriverInstallOrUpdate
driver_install_or_update_test.go:105: Skip if not darwin.
--- SKIP: TestHyperKitDriverInstallOrUpdate (0.00s)

                                                
                                    
x
+
TestHyperkitDriverSkipUpgrade (0s)

                                                
                                                
=== RUN   TestHyperkitDriverSkipUpgrade
driver_install_or_update_test.go:169: Skip if not darwin.
--- SKIP: TestHyperkitDriverSkipUpgrade (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/MySQL (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/MySQL
=== PAUSE TestFunctional/parallel/MySQL

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/MySQL
functional_test.go:1787: arm64 is not supported by mysql. Skip the test. See https://github.com/kubernetes/minikube/issues/10144
--- SKIP: TestFunctional/parallel/MySQL (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/PodmanEnv (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/PodmanEnv
=== PAUSE TestFunctional/parallel/PodmanEnv

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/PodmanEnv
functional_test.go:550: only validate podman env with docker container runtime, currently testing docker
--- SKIP: TestFunctional/parallel/PodmanEnv (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig
functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil
functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS
functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0.00s)

                                                
                                    
x
+
TestGvisorAddon (0s)

                                                
                                                
=== RUN   TestGvisorAddon
gvisor_addon_test.go:34: skipping test because --gvisor=false
--- SKIP: TestGvisorAddon (0.00s)

                                                
                                    
x
+
TestImageBuild/serial/validateImageBuildWithBuildEnv (0s)

                                                
                                                
=== RUN   TestImageBuild/serial/validateImageBuildWithBuildEnv
image_test.go:114: skipping due to https://github.com/kubernetes/minikube/issues/12431
--- SKIP: TestImageBuild/serial/validateImageBuildWithBuildEnv (0.00s)

                                                
                                    
x
+
TestChangeNoneUser (0s)

                                                
                                                
=== RUN   TestChangeNoneUser
none_test.go:38: Test requires none driver and SUDO_USER env to not be empty
--- SKIP: TestChangeNoneUser (0.00s)

                                                
                                    
x
+
TestScheduledStopWindows (0s)

                                                
                                                
=== RUN   TestScheduledStopWindows
scheduled_stop_test.go:42: test only runs on windows
--- SKIP: TestScheduledStopWindows (0.00s)

                                                
                                    
x
+
TestNetworkPlugins/group/cilium (5.6s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/cilium
net_test.go:102: Skipping the test as it's interfering with other tests and is outdated
panic.go:626: 
----------------------- debugLogs start: cilium-055531 [pass: true] --------------------------------
>>> netcat: nslookup kubernetes.default:
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> netcat: nslookup debug kubernetes.default a-records:
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> netcat: dig search kubernetes.default:
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local udp/53:
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local tcp/53:
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 udp/53:
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 tcp/53:
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> netcat: /etc/nsswitch.conf:
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> netcat: /etc/hosts:
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> netcat: /etc/resolv.conf:
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> host: /etc/nsswitch.conf:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: /etc/hosts:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: /etc/resolv.conf:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, :
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> host: crictl pods:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: crictl containers:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> k8s: describe netcat deployment:
error: context "cilium-055531" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe netcat pod(s):
error: context "cilium-055531" does not exist

                                                
                                                

                                                
                                                
>>> k8s: netcat logs:
error: context "cilium-055531" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns deployment:
error: context "cilium-055531" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns pods:
error: context "cilium-055531" does not exist

                                                
                                                

                                                
                                                
>>> k8s: coredns logs:
error: context "cilium-055531" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe api server pod(s):
error: context "cilium-055531" does not exist

                                                
                                                

                                                
                                                
>>> k8s: api server logs:
error: context "cilium-055531" does not exist

                                                
                                                

                                                
                                                
>>> host: /etc/cni:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: ip a s:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: ip r s:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: iptables-save:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: iptables table nat:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> k8s: describe cilium daemon set:
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> k8s: describe cilium daemon set pod(s):
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> k8s: cilium daemon set container(s) logs (current):
error: context "cilium-055531" does not exist

                                                
                                                

                                                
                                                
>>> k8s: cilium daemon set container(s) logs (previous):
error: context "cilium-055531" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe cilium deployment:
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> k8s: describe cilium deployment pod(s):
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> k8s: cilium deployment container(s) logs (current):
error: context "cilium-055531" does not exist

                                                
                                                

                                                
                                                
>>> k8s: cilium deployment container(s) logs (previous):
error: context "cilium-055531" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy daemon set:
error: context "cilium-055531" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy pod(s):
error: context "cilium-055531" does not exist

                                                
                                                

                                                
                                                
>>> k8s: kube-proxy logs:
error: context "cilium-055531" does not exist

                                                
                                                

                                                
                                                
>>> host: kubelet daemon status:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: kubelet daemon config:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> k8s: kubelet logs:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: /etc/kubernetes/kubelet.conf:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: /var/lib/kubelet/config.yaml:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> k8s: kubectl config:
apiVersion: v1
clusters: null
contexts: null
current-context: ""
kind: Config
preferences: {}
users: null

                                                
                                                

                                                
                                                
>>> k8s: cms:
Error in configuration: context was not found for specified context: cilium-055531

                                                
                                                

                                                
                                                
>>> host: docker daemon status:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: docker daemon config:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: /etc/docker/daemon.json:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: docker system info:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon status:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon config:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: /usr/lib/systemd/system/cri-docker.service:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: cri-dockerd version:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: containerd daemon status:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: containerd daemon config:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: /lib/systemd/system/containerd.service:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: /etc/containerd/config.toml:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: containerd config dump:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: crio daemon status:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: crio daemon config:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: /etc/crio:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                

                                                
                                                
>>> host: crio config:
* Profile "cilium-055531" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-055531"

                                                
                                                
----------------------- debugLogs end: cilium-055531 [took: 5.353957708s] --------------------------------
helpers_test.go:175: Cleaning up "cilium-055531" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p cilium-055531
--- SKIP: TestNetworkPlugins/group/cilium (5.60s)

                                                
                                    
x
+
TestStartStop/group/disable-driver-mounts (0.15s)

                                                
                                                
=== RUN   TestStartStop/group/disable-driver-mounts
=== PAUSE TestStartStop/group/disable-driver-mounts

                                                
                                                

                                                
                                                
=== CONT  TestStartStop/group/disable-driver-mounts
start_stop_delete_test.go:103: skipping TestStartStop/group/disable-driver-mounts - only runs on virtualbox
helpers_test.go:175: Cleaning up "disable-driver-mounts-243736" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-arm64 delete -p disable-driver-mounts-243736
--- SKIP: TestStartStop/group/disable-driver-mounts (0.15s)

                                                
                                    
Copied to clipboard