=== RUN TestStartStop/group/old-k8s-version/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run: out/minikube-linux-arm64 start -p old-k8s-version-894472 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=docker --kubernetes-version=v1.20.0
E0816 00:06:56.721477 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:06:58.933005 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:00.736599 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:02.579305 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:06.632105 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:11.226303 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:20.772464 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:38.929482 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:39.894447 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:47.593569 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:48.474932 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/auto-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.075288 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.081858 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.093269 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.115058 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.156453 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.237844 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.399328 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:07:59.721213 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:00.362560 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:01.643956 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:04.205372 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:09.327758 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.345918 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.352364 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.363830 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.385200 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.426791 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.508227 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.669737 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:18.991737 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:19.569814 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:19.633157 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:20.915444 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:23.477800 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:28.599427 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:38.841468 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:40.051519 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:08:59.323625 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:01.815887 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:09.515839 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:12.859789 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:16.870102 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:21.012928 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:36.182576 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/addons-083442/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:36.765431 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:36.771775 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:36.783139 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:36.804479 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:36.846090 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:36.927585 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:37.088864 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:37.410538 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:38.052785 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:39.334345 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:40.285312 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:40.563190 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kindnet-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:41.895903 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:44.578867 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/enable-default-cni-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:47.017932 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:09:57.259388 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:10:17.740749 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:10:39.510524 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/skaffold-624546/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:10:42.934443 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/custom-flannel-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:10:43.596401 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:10:58.702168 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/false-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:11:00.530856 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/functional-300199/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:11:02.206658 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/calico-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:11:17.956762 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/bridge-055531/client.crt: no such file or directory" logger="UnhandledError"
E0816 00:11:25.654975 2031396 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/kubenet-055531/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:256: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p old-k8s-version-894472 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=docker --kubernetes-version=v1.20.0: exit status 102 (6m18.124373157s)
-- stdout --
* [old-k8s-version-894472] minikube v1.33.1 on Ubuntu 20.04 (arm64)
- MINIKUBE_LOCATION=19452
- MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
- KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig
- MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube
- MINIKUBE_BIN=out/minikube-linux-arm64
- MINIKUBE_FORCE_SYSTEMD=
* Kubernetes 1.31.0 is now available. If you would like to upgrade, specify: --kubernetes-version=v1.31.0
* Using the docker driver based on existing profile
* Starting "old-k8s-version-894472" primary control-plane node in "old-k8s-version-894472" cluster
* Pulling base image v0.0.44-1723740748-19452 ...
* Restarting existing docker container for "old-k8s-version-894472" ...
* Preparing Kubernetes v1.20.0 on Docker 27.1.2 ...
* Verifying Kubernetes components...
- Using image gcr.io/k8s-minikube/storage-provisioner:v5
- Using image fake.domain/registry.k8s.io/echoserver:1.4
- Using image docker.io/kubernetesui/dashboard:v2.7.0
- Using image registry.k8s.io/echoserver:1.4
* Some dashboard features require the metrics-server addon. To enable all features please run:
minikube -p old-k8s-version-894472 addons enable metrics-server
* Enabled addons: default-storageclass, metrics-server, storage-provisioner, dashboard
-- /stdout --
** stderr **
I0816 00:06:56.478341 2407112 out.go:345] Setting OutFile to fd 1 ...
I0816 00:06:56.478559 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0816 00:06:56.478582 2407112 out.go:358] Setting ErrFile to fd 2...
I0816 00:06:56.478601 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0816 00:06:56.478891 2407112 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
I0816 00:06:56.479286 2407112 out.go:352] Setting JSON to false
I0816 00:06:56.480283 2407112 start.go:129] hostinfo: {"hostname":"ip-172-31-29-130","uptime":31761,"bootTime":1723735056,"procs":208,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1067-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"36adf542-ef4f-4e2d-a0c8-6868d1383ff9"}
I0816 00:06:56.480386 2407112 start.go:139] virtualization:
I0816 00:06:56.484771 2407112 out.go:177] * [old-k8s-version-894472] minikube v1.33.1 on Ubuntu 20.04 (arm64)
I0816 00:06:56.489078 2407112 out.go:177] - MINIKUBE_LOCATION=19452
I0816 00:06:56.489146 2407112 notify.go:220] Checking for updates...
I0816 00:06:56.495252 2407112 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0816 00:06:56.497537 2407112 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig
I0816 00:06:56.499696 2407112 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube
I0816 00:06:56.502149 2407112 out.go:177] - MINIKUBE_BIN=out/minikube-linux-arm64
I0816 00:06:56.503818 2407112 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0816 00:06:56.506603 2407112 config.go:182] Loaded profile config "old-k8s-version-894472": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.20.0
I0816 00:06:56.509199 2407112 out.go:177] * Kubernetes 1.31.0 is now available. If you would like to upgrade, specify: --kubernetes-version=v1.31.0
I0816 00:06:56.510758 2407112 driver.go:392] Setting default libvirt URI to qemu:///system
I0816 00:06:56.542297 2407112 docker.go:123] docker version: linux-27.1.2:Docker Engine - Community
I0816 00:06:56.542421 2407112 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0816 00:06:56.622084 2407112 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:2 ContainersRunning:1 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:34 OomKillDisable:true NGoroutines:53 SystemTime:2024-08-16 00:06:56.612263818 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
I0816 00:06:56.622193 2407112 docker.go:307] overlay module found
I0816 00:06:56.624971 2407112 out.go:177] * Using the docker driver based on existing profile
I0816 00:06:56.627017 2407112 start.go:297] selected driver: docker
I0816 00:06:56.627040 2407112 start.go:901] validating driver "docker" against &{Name:old-k8s-version-894472 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-894472 Namespace:default APIServerHAVIP: AP
IServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountSt
ring:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0816 00:06:56.627160 2407112 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0816 00:06:56.627741 2407112 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0816 00:06:56.712650 2407112 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:2 ContainersRunning:1 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:34 OomKillDisable:true NGoroutines:53 SystemTime:2024-08-16 00:06:56.703145608 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
I0816 00:06:56.713018 2407112 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0816 00:06:56.713048 2407112 cni.go:84] Creating CNI manager for ""
I0816 00:06:56.713068 2407112 cni.go:162] CNI unnecessary in this configuration, recommending no CNI
I0816 00:06:56.713112 2407112 start.go:340] cluster config:
{Name:old-k8s-version-894472 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-894472 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountI
P: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0816 00:06:56.716540 2407112 out.go:177] * Starting "old-k8s-version-894472" primary control-plane node in "old-k8s-version-894472" cluster
I0816 00:06:56.718269 2407112 cache.go:121] Beginning downloading kic base image for docker with docker
I0816 00:06:56.720160 2407112 out.go:177] * Pulling base image v0.0.44-1723740748-19452 ...
I0816 00:06:56.721981 2407112 preload.go:131] Checking if preload exists for k8s version v1.20.0 and runtime docker
I0816 00:06:56.722039 2407112 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4
I0816 00:06:56.722052 2407112 cache.go:56] Caching tarball of preloaded images
I0816 00:06:56.722141 2407112 preload.go:172] Found /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.20.0-docker-overlay2-arm64.tar.lz4 in cache, skipping download
I0816 00:06:56.722155 2407112 cache.go:59] Finished verifying existence of preloaded tar for v1.20.0 on docker
I0816 00:06:56.722340 2407112 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local docker daemon
I0816 00:06:56.722549 2407112 profile.go:143] Saving config to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/config.json ...
W0816 00:06:56.744934 2407112 image.go:95] image gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d is of wrong architecture
I0816 00:06:56.744952 2407112 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d to local cache
I0816 00:06:56.745027 2407112 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local cache directory
I0816 00:06:56.745044 2407112 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local cache directory, skipping pull
I0816 00:06:56.745049 2407112 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d exists in cache, skipping pull
I0816 00:06:56.745057 2407112 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d as a tarball
I0816 00:06:56.745063 2407112 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d from local cache
I0816 00:06:56.872136 2407112 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d from cached tarball
I0816 00:06:56.872178 2407112 cache.go:194] Successfully downloaded all kic artifacts
I0816 00:06:56.872221 2407112 start.go:360] acquireMachinesLock for old-k8s-version-894472: {Name:mkc65b883f793322a5198592ea6258fdb5d12c1e Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0816 00:06:56.872296 2407112 start.go:364] duration metric: took 41.902µs to acquireMachinesLock for "old-k8s-version-894472"
I0816 00:06:56.872323 2407112 start.go:96] Skipping create...Using existing machine configuration
I0816 00:06:56.872335 2407112 fix.go:54] fixHost starting:
I0816 00:06:56.872625 2407112 cli_runner.go:164] Run: docker container inspect old-k8s-version-894472 --format={{.State.Status}}
I0816 00:06:56.889006 2407112 fix.go:112] recreateIfNeeded on old-k8s-version-894472: state=Stopped err=<nil>
W0816 00:06:56.889034 2407112 fix.go:138] unexpected machine state, will restart: <nil>
I0816 00:06:56.891153 2407112 out.go:177] * Restarting existing docker container for "old-k8s-version-894472" ...
I0816 00:06:56.893031 2407112 cli_runner.go:164] Run: docker start old-k8s-version-894472
I0816 00:06:57.203439 2407112 cli_runner.go:164] Run: docker container inspect old-k8s-version-894472 --format={{.State.Status}}
I0816 00:06:57.229792 2407112 kic.go:430] container "old-k8s-version-894472" state is running.
I0816 00:06:57.231986 2407112 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-894472
I0816 00:06:57.253055 2407112 profile.go:143] Saving config to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/config.json ...
I0816 00:06:57.253286 2407112 machine.go:93] provisionDockerMachine start ...
I0816 00:06:57.253348 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
I0816 00:06:57.277267 2407112 main.go:141] libmachine: Using SSH client type: native
I0816 00:06:57.277532 2407112 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil> [] 0s} 127.0.0.1 35084 <nil> <nil>}
I0816 00:06:57.277547 2407112 main.go:141] libmachine: About to run SSH command:
hostname
I0816 00:06:57.278662 2407112 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I0816 00:07:00.417721 2407112 main.go:141] libmachine: SSH cmd err, output: <nil>: old-k8s-version-894472
I0816 00:07:00.417744 2407112 ubuntu.go:169] provisioning hostname "old-k8s-version-894472"
I0816 00:07:00.417828 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
I0816 00:07:00.437465 2407112 main.go:141] libmachine: Using SSH client type: native
I0816 00:07:00.437790 2407112 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil> [] 0s} 127.0.0.1 35084 <nil> <nil>}
I0816 00:07:00.437812 2407112 main.go:141] libmachine: About to run SSH command:
sudo hostname old-k8s-version-894472 && echo "old-k8s-version-894472" | sudo tee /etc/hostname
I0816 00:07:00.596905 2407112 main.go:141] libmachine: SSH cmd err, output: <nil>: old-k8s-version-894472
I0816 00:07:00.596993 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
I0816 00:07:00.614217 2407112 main.go:141] libmachine: Using SSH client type: native
I0816 00:07:00.614480 2407112 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil> [] 0s} 127.0.0.1 35084 <nil> <nil>}
I0816 00:07:00.614502 2407112 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-894472' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-894472/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-894472' | sudo tee -a /etc/hosts;
fi
fi
I0816 00:07:00.755319 2407112 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0816 00:07:00.755351 2407112 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19452-2026001/.minikube CaCertPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19452-2026001/.minikube}
I0816 00:07:00.755372 2407112 ubuntu.go:177] setting up certificates
I0816 00:07:00.755382 2407112 provision.go:84] configureAuth start
I0816 00:07:00.755449 2407112 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-894472
I0816 00:07:00.774680 2407112 provision.go:143] copyHostCerts
I0816 00:07:00.774750 2407112 exec_runner.go:144] found /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.pem, removing ...
I0816 00:07:00.774759 2407112 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.pem
I0816 00:07:00.774822 2407112 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.pem (1082 bytes)
I0816 00:07:00.774914 2407112 exec_runner.go:144] found /home/jenkins/minikube-integration/19452-2026001/.minikube/cert.pem, removing ...
I0816 00:07:00.774920 2407112 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19452-2026001/.minikube/cert.pem
I0816 00:07:00.774939 2407112 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19452-2026001/.minikube/cert.pem (1123 bytes)
I0816 00:07:00.774989 2407112 exec_runner.go:144] found /home/jenkins/minikube-integration/19452-2026001/.minikube/key.pem, removing ...
I0816 00:07:00.774993 2407112 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19452-2026001/.minikube/key.pem
I0816 00:07:00.775011 2407112 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19452-2026001/.minikube/key.pem (1675 bytes)
I0816 00:07:00.775055 2407112 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-894472 san=[127.0.0.1 192.168.85.2 localhost minikube old-k8s-version-894472]
I0816 00:07:01.005265 2407112 provision.go:177] copyRemoteCerts
I0816 00:07:01.005372 2407112 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0816 00:07:01.005471 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
I0816 00:07:01.025729 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
I0816 00:07:01.128226 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0816 00:07:01.157884 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I0816 00:07:01.188711 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0816 00:07:01.239191 2407112 provision.go:87] duration metric: took 483.794576ms to configureAuth
I0816 00:07:01.239218 2407112 ubuntu.go:193] setting minikube options for container-runtime
I0816 00:07:01.239421 2407112 config.go:182] Loaded profile config "old-k8s-version-894472": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.20.0
I0816 00:07:01.239489 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
I0816 00:07:01.258647 2407112 main.go:141] libmachine: Using SSH client type: native
I0816 00:07:01.258917 2407112 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil> [] 0s} 127.0.0.1 35084 <nil> <nil>}
I0816 00:07:01.258928 2407112 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0816 00:07:01.394503 2407112 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0816 00:07:01.394523 2407112 ubuntu.go:71] root file system type: overlay
I0816 00:07:01.394635 2407112 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0816 00:07:01.394702 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
I0816 00:07:01.421992 2407112 main.go:141] libmachine: Using SSH client type: native
I0816 00:07:01.422240 2407112 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil> [] 0s} 127.0.0.1 35084 <nil> <nil>}
I0816 00:07:01.422315 2407112 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0816 00:07:01.592994 2407112 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0816 00:07:01.593262 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
I0816 00:07:01.632745 2407112 main.go:141] libmachine: Using SSH client type: native
I0816 00:07:01.633003 2407112 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil> [] 0s} 127.0.0.1 35084 <nil> <nil>}
I0816 00:07:01.633021 2407112 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0816 00:07:01.808217 2407112 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0816 00:07:01.808256 2407112 machine.go:96] duration metric: took 4.554960543s to provisionDockerMachine
I0816 00:07:01.808270 2407112 start.go:293] postStartSetup for "old-k8s-version-894472" (driver="docker")
I0816 00:07:01.808286 2407112 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0816 00:07:01.808433 2407112 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0816 00:07:01.808497 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
I0816 00:07:01.829776 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
I0816 00:07:01.932858 2407112 ssh_runner.go:195] Run: cat /etc/os-release
I0816 00:07:01.938971 2407112 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0816 00:07:01.939136 2407112 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0816 00:07:01.939248 2407112 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0816 00:07:01.939316 2407112 info.go:137] Remote host: Ubuntu 22.04.4 LTS
I0816 00:07:01.939351 2407112 filesync.go:126] Scanning /home/jenkins/minikube-integration/19452-2026001/.minikube/addons for local assets ...
I0816 00:07:01.939510 2407112 filesync.go:126] Scanning /home/jenkins/minikube-integration/19452-2026001/.minikube/files for local assets ...
I0816 00:07:01.939680 2407112 filesync.go:149] local asset: /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem -> 20313962.pem in /etc/ssl/certs
I0816 00:07:01.939860 2407112 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0816 00:07:01.952332 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem --> /etc/ssl/certs/20313962.pem (1708 bytes)
I0816 00:07:01.988689 2407112 start.go:296] duration metric: took 180.394597ms for postStartSetup
I0816 00:07:01.988871 2407112 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0816 00:07:01.988958 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
I0816 00:07:02.013667 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
I0816 00:07:02.116008 2407112 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0816 00:07:02.124115 2407112 fix.go:56] duration metric: took 5.251770675s for fixHost
I0816 00:07:02.124196 2407112 start.go:83] releasing machines lock for "old-k8s-version-894472", held for 5.251886275s
I0816 00:07:02.124306 2407112 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-894472
I0816 00:07:02.145566 2407112 ssh_runner.go:195] Run: cat /version.json
I0816 00:07:02.145654 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
I0816 00:07:02.145861 2407112 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0816 00:07:02.145940 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
I0816 00:07:02.165393 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
I0816 00:07:02.191438 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
I0816 00:07:02.406009 2407112 ssh_runner.go:195] Run: systemctl --version
I0816 00:07:02.410660 2407112 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0816 00:07:02.415242 2407112 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0816 00:07:02.436603 2407112 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0816 00:07:02.436768 2407112 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *bridge* -not -name *podman* -not -name *.mk_disabled -printf "%p, " -exec sh -c "sudo sed -i -r -e '/"dst": ".*:.*"/d' -e 's|^(.*)"dst": (.*)[,*]$|\1"dst": \2|g' -e '/"subnet": ".*:.*"/d' -e 's|^(.*)"subnet": ".*"(.*)[,*]$|\1"subnet": "10.244.0.0/16"\2|g' {}" ;
I0816 00:07:02.460074 2407112 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *podman* -not -name *.mk_disabled -printf "%p, " -exec sh -c "sudo sed -i -r -e 's|^(.*)"subnet": ".*"(.*)$|\1"subnet": "10.244.0.0/16"\2|g' -e 's|^(.*)"gateway": ".*"(.*)$|\1"gateway": "10.244.0.1"\2|g' {}" ;
I0816 00:07:02.480751 2407112 cni.go:308] configured [/etc/cni/net.d/100-crio-bridge.conf, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I0816 00:07:02.480823 2407112 start.go:495] detecting cgroup driver to use...
I0816 00:07:02.480871 2407112 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0816 00:07:02.480999 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0816 00:07:02.503673 2407112 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.2"|' /etc/containerd/config.toml"
I0816 00:07:02.515774 2407112 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0816 00:07:02.527763 2407112 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0816 00:07:02.527880 2407112 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0816 00:07:02.540061 2407112 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0816 00:07:02.551481 2407112 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0816 00:07:02.562500 2407112 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0816 00:07:02.574034 2407112 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0816 00:07:02.586375 2407112 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0816 00:07:02.598054 2407112 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0816 00:07:02.608932 2407112 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0816 00:07:02.622182 2407112 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0816 00:07:02.742535 2407112 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0816 00:07:02.903514 2407112 start.go:495] detecting cgroup driver to use...
I0816 00:07:02.903563 2407112 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0816 00:07:02.903623 2407112 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0816 00:07:02.938058 2407112 cruntime.go:279] skipping containerd shutdown because we are bound to it
I0816 00:07:02.938150 2407112 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0816 00:07:02.955198 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/dockershim.sock
" | sudo tee /etc/crictl.yaml"
I0816 00:07:02.985653 2407112 ssh_runner.go:195] Run: which cri-dockerd
I0816 00:07:02.996916 2407112 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0816 00:07:03.017552 2407112 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (189 bytes)
I0816 00:07:03.051778 2407112 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0816 00:07:03.213417 2407112 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0816 00:07:03.398809 2407112 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0816 00:07:03.398972 2407112 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0816 00:07:03.430030 2407112 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0816 00:07:03.595160 2407112 ssh_runner.go:195] Run: sudo systemctl restart docker
I0816 00:07:04.243426 2407112 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0816 00:07:04.274355 2407112 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0816 00:07:04.314372 2407112 out.go:235] * Preparing Kubernetes v1.20.0 on Docker 27.1.2 ...
I0816 00:07:04.314465 2407112 cli_runner.go:164] Run: docker network inspect old-k8s-version-894472 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0816 00:07:04.338351 2407112 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I0816 00:07:04.342439 2407112 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0816 00:07:04.354667 2407112 kubeadm.go:883] updating cluster {Name:old-k8s-version-894472 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-894472 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkin
s:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0816 00:07:04.354805 2407112 preload.go:131] Checking if preload exists for k8s version v1.20.0 and runtime docker
I0816 00:07:04.354861 2407112 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0816 00:07:04.374314 2407112 docker.go:685] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/storage-provisioner:v5
k8s.gcr.io/kube-proxy:v1.20.0
registry.k8s.io/kube-proxy:v1.20.0
k8s.gcr.io/kube-controller-manager:v1.20.0
registry.k8s.io/kube-controller-manager:v1.20.0
k8s.gcr.io/kube-apiserver:v1.20.0
registry.k8s.io/kube-apiserver:v1.20.0
k8s.gcr.io/kube-scheduler:v1.20.0
registry.k8s.io/kube-scheduler:v1.20.0
k8s.gcr.io/etcd:3.4.13-0
registry.k8s.io/etcd:3.4.13-0
k8s.gcr.io/coredns:1.7.0
registry.k8s.io/coredns:1.7.0
k8s.gcr.io/pause:3.2
registry.k8s.io/pause:3.2
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I0816 00:07:04.374337 2407112 docker.go:615] Images already preloaded, skipping extraction
I0816 00:07:04.374398 2407112 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0816 00:07:04.393054 2407112 docker.go:685] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/storage-provisioner:v5
k8s.gcr.io/kube-proxy:v1.20.0
registry.k8s.io/kube-proxy:v1.20.0
k8s.gcr.io/kube-apiserver:v1.20.0
registry.k8s.io/kube-apiserver:v1.20.0
k8s.gcr.io/kube-controller-manager:v1.20.0
registry.k8s.io/kube-controller-manager:v1.20.0
k8s.gcr.io/kube-scheduler:v1.20.0
registry.k8s.io/kube-scheduler:v1.20.0
k8s.gcr.io/etcd:3.4.13-0
registry.k8s.io/etcd:3.4.13-0
k8s.gcr.io/coredns:1.7.0
registry.k8s.io/coredns:1.7.0
k8s.gcr.io/pause:3.2
registry.k8s.io/pause:3.2
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I0816 00:07:04.393081 2407112 cache_images.go:84] Images are preloaded, skipping loading
I0816 00:07:04.393093 2407112 kubeadm.go:934] updating node { 192.168.85.2 8443 v1.20.0 docker true true} ...
I0816 00:07:04.393221 2407112 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.20.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime=docker --hostname-override=old-k8s-version-894472 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-894472 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0816 00:07:04.393302 2407112 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0816 00:07:04.488139 2407112 cni.go:84] Creating CNI manager for ""
I0816 00:07:04.488170 2407112 cni.go:162] CNI unnecessary in this configuration, recommending no CNI
I0816 00:07:04.488179 2407112 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0816 00:07:04.488199 2407112 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.20.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-894472 NodeName:old-k8s-version-894472 DNSDomain:cluster.local CRISocket:/var/run/dockershim.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticP
odPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:false}
I0816 00:07:04.488353 2407112 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: "old-k8s-version-894472"
kubeletExtraArgs:
node-ip: 192.168.85.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0816 00:07:04.488426 2407112 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.20.0
I0816 00:07:04.502377 2407112 binaries.go:44] Found k8s binaries, skipping transfer
I0816 00:07:04.502449 2407112 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0816 00:07:04.512344 2407112 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (348 bytes)
I0816 00:07:04.534450 2407112 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0816 00:07:04.559341 2407112 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2118 bytes)
I0816 00:07:04.581498 2407112 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I0816 00:07:04.585190 2407112 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0816 00:07:04.596978 2407112 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0816 00:07:04.712358 2407112 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0816 00:07:04.728930 2407112 certs.go:68] Setting up /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472 for IP: 192.168.85.2
I0816 00:07:04.728956 2407112 certs.go:194] generating shared ca certs ...
I0816 00:07:04.728973 2407112 certs.go:226] acquiring lock for ca certs: {Name:mkddf294a5c2bc6874920ab9b3e5ac4767302c25 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0816 00:07:04.729115 2407112 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.key
I0816 00:07:04.729163 2407112 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/proxy-client-ca.key
I0816 00:07:04.729174 2407112 certs.go:256] generating profile certs ...
I0816 00:07:04.729258 2407112 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/client.key
I0816 00:07:04.729331 2407112 certs.go:359] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/apiserver.key.8284e299
I0816 00:07:04.729376 2407112 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/proxy-client.key
I0816 00:07:04.729502 2407112 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/2031396.pem (1338 bytes)
W0816 00:07:04.729537 2407112 certs.go:480] ignoring /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/2031396_empty.pem, impossibly tiny 0 bytes
I0816 00:07:04.729550 2407112 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca-key.pem (1679 bytes)
I0816 00:07:04.729576 2407112 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem (1082 bytes)
I0816 00:07:04.729685 2407112 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/cert.pem (1123 bytes)
I0816 00:07:04.729715 2407112 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/key.pem (1675 bytes)
I0816 00:07:04.729767 2407112 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem (1708 bytes)
I0816 00:07:04.730391 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0816 00:07:04.790545 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0816 00:07:04.872879 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0816 00:07:04.962973 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0816 00:07:05.054747 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I0816 00:07:05.189932 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0816 00:07:05.279413 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0816 00:07:05.312601 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/old-k8s-version-894472/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0816 00:07:05.348127 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0816 00:07:05.383332 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/2031396.pem --> /usr/share/ca-certificates/2031396.pem (1338 bytes)
I0816 00:07:05.416757 2407112 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem --> /usr/share/ca-certificates/20313962.pem (1708 bytes)
I0816 00:07:05.456103 2407112 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0816 00:07:05.491819 2407112 ssh_runner.go:195] Run: openssl version
I0816 00:07:05.498791 2407112 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0816 00:07:05.514242 2407112 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0816 00:07:05.520526 2407112 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Aug 15 23:06 /usr/share/ca-certificates/minikubeCA.pem
I0816 00:07:05.520604 2407112 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0816 00:07:05.535660 2407112 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0816 00:07:05.551416 2407112 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/2031396.pem && ln -fs /usr/share/ca-certificates/2031396.pem /etc/ssl/certs/2031396.pem"
I0816 00:07:05.562633 2407112 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2031396.pem
I0816 00:07:05.572066 2407112 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Aug 15 23:13 /usr/share/ca-certificates/2031396.pem
I0816 00:07:05.572134 2407112 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2031396.pem
I0816 00:07:05.582682 2407112 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/2031396.pem /etc/ssl/certs/51391683.0"
I0816 00:07:05.593525 2407112 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/20313962.pem && ln -fs /usr/share/ca-certificates/20313962.pem /etc/ssl/certs/20313962.pem"
I0816 00:07:05.607678 2407112 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/20313962.pem
I0816 00:07:05.611743 2407112 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Aug 15 23:13 /usr/share/ca-certificates/20313962.pem
I0816 00:07:05.611810 2407112 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/20313962.pem
I0816 00:07:05.624432 2407112 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/20313962.pem /etc/ssl/certs/3ec20f2e.0"
I0816 00:07:05.638734 2407112 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0816 00:07:05.642770 2407112 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I0816 00:07:05.651056 2407112 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I0816 00:07:05.661346 2407112 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I0816 00:07:05.670401 2407112 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I0816 00:07:05.680326 2407112 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I0816 00:07:05.691198 2407112 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
I0816 00:07:05.703145 2407112 kubeadm.go:392] StartCluster: {Name:old-k8s-version-894472 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-894472 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/
minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0816 00:07:05.703342 2407112 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0816 00:07:05.738014 2407112 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0816 00:07:05.748400 2407112 kubeadm.go:408] found existing configuration files, will attempt cluster restart
I0816 00:07:05.748423 2407112 kubeadm.go:593] restartPrimaryControlPlane start ...
I0816 00:07:05.748492 2407112 ssh_runner.go:195] Run: sudo test -d /data/minikube
I0816 00:07:05.761251 2407112 kubeadm.go:130] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I0816 00:07:05.761923 2407112 kubeconfig.go:47] verify endpoint returned: get endpoint: "old-k8s-version-894472" does not appear in /home/jenkins/minikube-integration/19452-2026001/kubeconfig
I0816 00:07:05.762213 2407112 kubeconfig.go:62] /home/jenkins/minikube-integration/19452-2026001/kubeconfig needs updating (will repair): [kubeconfig missing "old-k8s-version-894472" cluster setting kubeconfig missing "old-k8s-version-894472" context setting]
I0816 00:07:05.762733 2407112 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/kubeconfig: {Name:mkb1a4d12f06c0f193e7cb7c118eeb997c3969bc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0816 00:07:05.764582 2407112 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I0816 00:07:05.775566 2407112 kubeadm.go:630] The running cluster does not require reconfiguration: 192.168.85.2
I0816 00:07:05.775599 2407112 kubeadm.go:597] duration metric: took 27.169035ms to restartPrimaryControlPlane
I0816 00:07:05.775608 2407112 kubeadm.go:394] duration metric: took 72.496933ms to StartCluster
I0816 00:07:05.775631 2407112 settings.go:142] acquiring lock: {Name:mkd932093f6b6db884e5d5f97d2ea9be134ab309 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0816 00:07:05.775691 2407112 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/19452-2026001/kubeconfig
I0816 00:07:05.776766 2407112 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/kubeconfig: {Name:mkb1a4d12f06c0f193e7cb7c118eeb997c3969bc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0816 00:07:05.776976 2407112 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0816 00:07:05.777314 2407112 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:true default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0816 00:07:05.777409 2407112 addons.go:69] Setting storage-provisioner=true in profile "old-k8s-version-894472"
I0816 00:07:05.777436 2407112 addons.go:234] Setting addon storage-provisioner=true in "old-k8s-version-894472"
W0816 00:07:05.777444 2407112 addons.go:243] addon storage-provisioner should already be in state true
I0816 00:07:05.777468 2407112 host.go:66] Checking if "old-k8s-version-894472" exists ...
I0816 00:07:05.778242 2407112 cli_runner.go:164] Run: docker container inspect old-k8s-version-894472 --format={{.State.Status}}
I0816 00:07:05.778448 2407112 config.go:182] Loaded profile config "old-k8s-version-894472": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.20.0
I0816 00:07:05.778494 2407112 addons.go:69] Setting default-storageclass=true in profile "old-k8s-version-894472"
I0816 00:07:05.778527 2407112 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-894472"
I0816 00:07:05.778768 2407112 cli_runner.go:164] Run: docker container inspect old-k8s-version-894472 --format={{.State.Status}}
I0816 00:07:05.779142 2407112 addons.go:69] Setting metrics-server=true in profile "old-k8s-version-894472"
I0816 00:07:05.779169 2407112 addons.go:234] Setting addon metrics-server=true in "old-k8s-version-894472"
W0816 00:07:05.779178 2407112 addons.go:243] addon metrics-server should already be in state true
I0816 00:07:05.779199 2407112 host.go:66] Checking if "old-k8s-version-894472" exists ...
I0816 00:07:05.779588 2407112 cli_runner.go:164] Run: docker container inspect old-k8s-version-894472 --format={{.State.Status}}
I0816 00:07:05.780076 2407112 addons.go:69] Setting dashboard=true in profile "old-k8s-version-894472"
I0816 00:07:05.780103 2407112 addons.go:234] Setting addon dashboard=true in "old-k8s-version-894472"
W0816 00:07:05.780110 2407112 addons.go:243] addon dashboard should already be in state true
I0816 00:07:05.780132 2407112 host.go:66] Checking if "old-k8s-version-894472" exists ...
I0816 00:07:05.780521 2407112 cli_runner.go:164] Run: docker container inspect old-k8s-version-894472 --format={{.State.Status}}
I0816 00:07:05.781340 2407112 out.go:177] * Verifying Kubernetes components...
I0816 00:07:05.786896 2407112 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0816 00:07:05.846736 2407112 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0816 00:07:05.848963 2407112 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0816 00:07:05.848982 2407112 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0816 00:07:05.849044 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
I0816 00:07:05.849811 2407112 out.go:177] - Using image fake.domain/registry.k8s.io/echoserver:1.4
I0816 00:07:05.852051 2407112 out.go:177] - Using image docker.io/kubernetesui/dashboard:v2.7.0
I0816 00:07:05.854285 2407112 out.go:177] - Using image registry.k8s.io/echoserver:1.4
I0816 00:07:05.854342 2407112 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I0816 00:07:05.854362 2407112 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I0816 00:07:05.854426 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
I0816 00:07:05.855926 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-ns.yaml
I0816 00:07:05.855950 2407112 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
I0816 00:07:05.856011 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
I0816 00:07:05.862793 2407112 addons.go:234] Setting addon default-storageclass=true in "old-k8s-version-894472"
W0816 00:07:05.862818 2407112 addons.go:243] addon default-storageclass should already be in state true
I0816 00:07:05.862845 2407112 host.go:66] Checking if "old-k8s-version-894472" exists ...
I0816 00:07:05.863265 2407112 cli_runner.go:164] Run: docker container inspect old-k8s-version-894472 --format={{.State.Status}}
I0816 00:07:05.937765 2407112 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I0816 00:07:05.937786 2407112 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0816 00:07:05.937849 2407112 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-894472
I0816 00:07:05.938903 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
I0816 00:07:05.939799 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
I0816 00:07:05.949373 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
I0816 00:07:05.981218 2407112 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35084 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/old-k8s-version-894472/id_rsa Username:docker}
I0816 00:07:06.080695 2407112 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0816 00:07:06.150247 2407112 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-894472" to be "Ready" ...
I0816 00:07:06.191728 2407112 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I0816 00:07:06.191762 2407112 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1825 bytes)
I0816 00:07:06.239892 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
I0816 00:07:06.239922 2407112 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
I0816 00:07:06.258370 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0816 00:07:06.270746 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0816 00:07:06.301914 2407112 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I0816 00:07:06.301941 2407112 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I0816 00:07:06.363677 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
I0816 00:07:06.363704 2407112 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
I0816 00:07:06.391527 2407112 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I0816 00:07:06.391554 2407112 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I0816 00:07:06.484518 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-configmap.yaml
I0816 00:07:06.484554 2407112 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
I0816 00:07:06.548418 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0816 00:07:06.627810 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-dp.yaml
I0816 00:07:06.627839 2407112 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4201 bytes)
W0816 00:07:06.641267 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:06.641316 2407112 retry.go:31] will retry after 127.065326ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
W0816 00:07:06.641386 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:06.641400 2407112 retry.go:31] will retry after 277.783153ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:06.701939 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-role.yaml
I0816 00:07:06.701977 2407112 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
I0816 00:07:06.750955 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
I0816 00:07:06.750997 2407112 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
I0816 00:07:06.769342 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
W0816 00:07:06.813658 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:06.813693 2407112 retry.go:31] will retry after 153.642197ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:06.836149 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-sa.yaml
I0816 00:07:06.836175 2407112 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
I0816 00:07:06.911117 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-secret.yaml
I0816 00:07:06.911159 2407112 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
I0816 00:07:06.919341 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
I0816 00:07:06.967699 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0816 00:07:07.025039 2407112 addons.go:431] installing /etc/kubernetes/addons/dashboard-svc.yaml
I0816 00:07:07.025069 2407112 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
W0816 00:07:07.029064 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:07.029102 2407112 retry.go:31] will retry after 509.569838ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:07.088653 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
W0816 00:07:07.308033 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:07.308110 2407112 retry.go:31] will retry after 539.39958ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
W0816 00:07:07.373234 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:07.373320 2407112 retry.go:31] will retry after 400.225326ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
W0816 00:07:07.402944 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:07.403044 2407112 retry.go:31] will retry after 342.699121ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:07.539189 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
W0816 00:07:07.655104 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:07.655181 2407112 retry.go:31] will retry after 696.953315ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:07.746537 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I0816 00:07:07.773936 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0816 00:07:07.848437 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
W0816 00:07:07.926149 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:07.926231 2407112 retry.go:31] will retry after 332.717558ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
W0816 00:07:08.022145 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:08.022230 2407112 retry.go:31] will retry after 610.563639ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
W0816 00:07:08.082574 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:08.082667 2407112 retry.go:31] will retry after 378.792115ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:08.151166 2407112 node_ready.go:53] error getting node "old-k8s-version-894472": Get "https://192.168.85.2:8443/api/v1/nodes/old-k8s-version-894472": dial tcp 192.168.85.2:8443: connect: connection refused
I0816 00:07:08.259415 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I0816 00:07:08.352929 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
W0816 00:07:08.404103 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:08.404189 2407112 retry.go:31] will retry after 403.549215ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:08.462482 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
W0816 00:07:08.522695 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:08.522775 2407112 retry.go:31] will retry after 935.790118ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:08.633975 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
W0816 00:07:08.775232 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:08.775313 2407112 retry.go:31] will retry after 819.876638ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:08.808618 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
W0816 00:07:09.055251 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:09.055334 2407112 retry.go:31] will retry after 1.191679901s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
W0816 00:07:09.175621 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:09.175714 2407112 retry.go:31] will retry after 632.221449ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:09.459073 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
I0816 00:07:09.595794 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
W0816 00:07:09.645174 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:09.645258 2407112 retry.go:31] will retry after 932.634043ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:09.808967 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
W0816 00:07:09.950456 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:09.950487 2407112 retry.go:31] will retry after 1.49646314s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
W0816 00:07:10.197148 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:10.197180 2407112 retry.go:31] will retry after 1.403935272s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:10.247495 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
W0816 00:07:10.555903 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:10.555934 2407112 retry.go:31] will retry after 1.661575233s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:10.578252 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
I0816 00:07:10.650868 2407112 node_ready.go:53] error getting node "old-k8s-version-894472": Get "https://192.168.85.2:8443/api/v1/nodes/old-k8s-version-894472": dial tcp 192.168.85.2:8443: connect: connection refused
W0816 00:07:10.893554 2407112 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:10.893582 2407112 retry.go:31] will retry after 2.548067216s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I0816 00:07:11.447619 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
I0816 00:07:11.601312 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I0816 00:07:12.218552 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0816 00:07:13.442147 2407112 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
I0816 00:07:21.651926 2407112 node_ready.go:53] error getting node "old-k8s-version-894472": Get "https://192.168.85.2:8443/api/v1/nodes/old-k8s-version-894472": net/http: TLS handshake timeout
I0816 00:07:22.623851 2407112 node_ready.go:49] node "old-k8s-version-894472" has status "Ready":"True"
I0816 00:07:22.623879 2407112 node_ready.go:38] duration metric: took 16.473596765s for node "old-k8s-version-894472" to be "Ready" ...
I0816 00:07:22.623890 2407112 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0816 00:07:22.762212 2407112 pod_ready.go:79] waiting up to 6m0s for pod "coredns-74ff55c5b-9l4p8" in "kube-system" namespace to be "Ready" ...
I0816 00:07:23.293536 2407112 pod_ready.go:98] error getting pod "coredns-74ff55c5b-9l4p8" in "kube-system" namespace (skipping!): pods "coredns-74ff55c5b-9l4p8" not found
I0816 00:07:23.293634 2407112 pod_ready.go:82] duration metric: took 531.341411ms for pod "coredns-74ff55c5b-9l4p8" in "kube-system" namespace to be "Ready" ...
E0816 00:07:23.293662 2407112 pod_ready.go:67] WaitExtra: waitPodCondition: error getting pod "coredns-74ff55c5b-9l4p8" in "kube-system" namespace (skipping!): pods "coredns-74ff55c5b-9l4p8" not found
I0816 00:07:23.293703 2407112 pod_ready.go:79] waiting up to 6m0s for pod "coredns-74ff55c5b-jrtq9" in "kube-system" namespace to be "Ready" ...
I0816 00:07:24.108036 2407112 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: (12.660384019s)
I0816 00:07:24.481961 2407112 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (12.263370982s)
I0816 00:07:24.482049 2407112 addons.go:475] Verifying addon metrics-server=true in "old-k8s-version-894472"
I0816 00:07:24.482056 2407112 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (12.880707038s)
I0816 00:07:24.482026 2407112 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: (11.039847175s)
I0816 00:07:24.484468 2407112 out.go:177] * Some dashboard features require the metrics-server addon. To enable all features please run:
minikube -p old-k8s-version-894472 addons enable metrics-server
I0816 00:07:24.486656 2407112 out.go:177] * Enabled addons: default-storageclass, metrics-server, storage-provisioner, dashboard
I0816 00:07:24.488770 2407112 addons.go:510] duration metric: took 18.711448659s for enable addons: enabled=[default-storageclass metrics-server storage-provisioner dashboard]
I0816 00:07:25.310415 2407112 pod_ready.go:103] pod "coredns-74ff55c5b-jrtq9" in "kube-system" namespace has status "Ready":"False"
I0816 00:07:27.799471 2407112 pod_ready.go:103] pod "coredns-74ff55c5b-jrtq9" in "kube-system" namespace has status "Ready":"False"
I0816 00:07:29.799679 2407112 pod_ready.go:103] pod "coredns-74ff55c5b-jrtq9" in "kube-system" namespace has status "Ready":"False"
I0816 00:07:31.800888 2407112 pod_ready.go:103] pod "coredns-74ff55c5b-jrtq9" in "kube-system" namespace has status "Ready":"False"
I0816 00:07:33.799943 2407112 pod_ready.go:93] pod "coredns-74ff55c5b-jrtq9" in "kube-system" namespace has status "Ready":"True"
I0816 00:07:33.799972 2407112 pod_ready.go:82] duration metric: took 10.506239661s for pod "coredns-74ff55c5b-jrtq9" in "kube-system" namespace to be "Ready" ...
I0816 00:07:33.799984 2407112 pod_ready.go:79] waiting up to 6m0s for pod "etcd-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
I0816 00:07:35.806471 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:07:37.806695 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:07:40.306706 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:07:42.357347 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:07:44.814869 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:07:47.307295 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:07:49.307405 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:07:51.806545 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:07:54.308419 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:07:56.805976 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:07:58.806677 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:00.806924 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:03.307232 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:05.806416 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:07.806957 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:10.317170 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:12.806238 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:14.806345 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:16.806766 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:18.807296 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:21.306239 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:23.307345 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:25.806390 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:28.306821 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:30.307617 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:32.806490 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:34.807202 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:37.305527 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:39.306419 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:41.306562 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:43.306649 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:45.806375 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:47.810884 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:50.307666 2407112 pod_ready.go:103] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:51.306835 2407112 pod_ready.go:93] pod "etcd-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"True"
I0816 00:08:51.306858 2407112 pod_ready.go:82] duration metric: took 1m17.506866593s for pod "etcd-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
I0816 00:08:51.306869 2407112 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
I0816 00:08:51.312013 2407112 pod_ready.go:93] pod "kube-apiserver-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"True"
I0816 00:08:51.312039 2407112 pod_ready.go:82] duration metric: took 5.161937ms for pod "kube-apiserver-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
I0816 00:08:51.312050 2407112 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
I0816 00:08:51.317635 2407112 pod_ready.go:93] pod "kube-controller-manager-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"True"
I0816 00:08:51.317709 2407112 pod_ready.go:82] duration metric: took 5.650024ms for pod "kube-controller-manager-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
I0816 00:08:51.317737 2407112 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-4n8ls" in "kube-system" namespace to be "Ready" ...
I0816 00:08:51.324382 2407112 pod_ready.go:93] pod "kube-proxy-4n8ls" in "kube-system" namespace has status "Ready":"True"
I0816 00:08:51.324409 2407112 pod_ready.go:82] duration metric: took 6.647801ms for pod "kube-proxy-4n8ls" in "kube-system" namespace to be "Ready" ...
I0816 00:08:51.324422 2407112 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
I0816 00:08:52.332136 2407112 pod_ready.go:93] pod "kube-scheduler-old-k8s-version-894472" in "kube-system" namespace has status "Ready":"True"
I0816 00:08:52.332161 2407112 pod_ready.go:82] duration metric: took 1.007730969s for pod "kube-scheduler-old-k8s-version-894472" in "kube-system" namespace to be "Ready" ...
I0816 00:08:52.332173 2407112 pod_ready.go:79] waiting up to 6m0s for pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace to be "Ready" ...
I0816 00:08:54.338045 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:56.839222 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:08:59.339056 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:01.842610 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:04.338232 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:06.338527 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:08.839007 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:10.843561 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:13.338661 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:15.339174 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:17.340960 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:19.838262 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:21.839210 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:24.340073 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:26.839863 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:29.337558 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:31.338827 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:33.340313 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:35.838874 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:38.338237 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:40.840120 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:43.337461 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:45.339730 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:47.838705 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:50.345545 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:52.838982 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:55.338624 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:57.838361 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:09:59.839809 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:02.339011 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:04.838809 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:07.337842 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:09.340016 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:11.839360 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:13.843229 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:16.338914 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:18.340058 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:20.837772 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:22.838039 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:24.838979 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:27.337589 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:29.337725 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:31.338409 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:33.339134 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:35.838881 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:37.842939 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:40.338883 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:42.837905 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:44.842032 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:47.338504 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:49.839334 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:52.339057 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:54.838761 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:57.338096 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:10:59.338957 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:01.839308 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:03.841056 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:06.338040 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:08.339556 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:10.839459 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:13.338749 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:15.839574 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:18.337929 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:20.338790 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:22.339116 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:24.840378 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:27.338241 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:29.852230 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:32.338799 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:34.838985 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:37.345046 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:39.838474 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:42.338170 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:44.362656 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:46.838875 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:48.841654 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:51.338832 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:53.343372 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:55.838560 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:57.838907 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:59.839494 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:01.841503 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:04.338013 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:06.340432 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:08.847475 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:11.339958 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:13.341238 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:15.358241 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:17.842997 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:20.343831 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:22.839620 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:25.338275 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:27.338503 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:29.338983 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:31.839009 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:34.338847 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:36.339743 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:38.839318 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:41.337738 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:43.338933 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:45.838487 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:47.840406 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:50.337729 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:52.337948 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:52.337979 2407112 pod_ready.go:82] duration metric: took 4m0.00579824s for pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace to be "Ready" ...
E0816 00:12:52.337989 2407112 pod_ready.go:67] WaitExtra: waitPodCondition: context deadline exceeded
I0816 00:12:52.337996 2407112 pod_ready.go:39] duration metric: took 5m29.714095578s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0816 00:12:52.338015 2407112 api_server.go:52] waiting for apiserver process to appear ...
I0816 00:12:52.338092 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
I0816 00:12:52.356755 2407112 logs.go:276] 2 containers: [682baec10b08 3d14903eaff5]
I0816 00:12:52.356831 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
I0816 00:12:52.374920 2407112 logs.go:276] 2 containers: [5aacba0afc73 15f34ed96b2b]
I0816 00:12:52.375012 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
I0816 00:12:52.394135 2407112 logs.go:276] 2 containers: [0646646e2348 f583e4715841]
I0816 00:12:52.394219 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
I0816 00:12:52.411860 2407112 logs.go:276] 2 containers: [67be7ec054c6 003fa784026a]
I0816 00:12:52.411939 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
I0816 00:12:52.430627 2407112 logs.go:276] 2 containers: [c5eeddd51e95 1e79f4e5d490]
I0816 00:12:52.430718 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
I0816 00:12:52.448774 2407112 logs.go:276] 2 containers: [cc3ceefdfcf9 821653363c67]
I0816 00:12:52.448883 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
I0816 00:12:52.467313 2407112 logs.go:276] 0 containers: []
W0816 00:12:52.467337 2407112 logs.go:278] No container was found matching "kindnet"
I0816 00:12:52.467406 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
I0816 00:12:52.488664 2407112 logs.go:276] 2 containers: [a6efcfb5cb17 de096650c620]
I0816 00:12:52.488759 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kubernetes-dashboard --format={{.ID}}
I0816 00:12:52.508042 2407112 logs.go:276] 1 containers: [3ef1e388df06]
I0816 00:12:52.508089 2407112 logs.go:123] Gathering logs for kube-apiserver [3d14903eaff5] ...
I0816 00:12:52.508101 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3d14903eaff5"
I0816 00:12:52.575393 2407112 logs.go:123] Gathering logs for etcd [15f34ed96b2b] ...
I0816 00:12:52.575429 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 15f34ed96b2b"
I0816 00:12:52.609501 2407112 logs.go:123] Gathering logs for kube-proxy [1e79f4e5d490] ...
I0816 00:12:52.609527 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 1e79f4e5d490"
I0816 00:12:52.632173 2407112 logs.go:123] Gathering logs for kube-controller-manager [cc3ceefdfcf9] ...
I0816 00:12:52.632251 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 cc3ceefdfcf9"
I0816 00:12:52.680493 2407112 logs.go:123] Gathering logs for Docker ...
I0816 00:12:52.680527 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I0816 00:12:52.708209 2407112 logs.go:123] Gathering logs for dmesg ...
I0816 00:12:52.708242 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I0816 00:12:52.725031 2407112 logs.go:123] Gathering logs for describe nodes ...
I0816 00:12:52.725068 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I0816 00:12:52.888699 2407112 logs.go:123] Gathering logs for kube-apiserver [682baec10b08] ...
I0816 00:12:52.888730 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 682baec10b08"
I0816 00:12:52.947378 2407112 logs.go:123] Gathering logs for etcd [5aacba0afc73] ...
I0816 00:12:52.947415 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 5aacba0afc73"
I0816 00:12:52.984633 2407112 logs.go:123] Gathering logs for kube-controller-manager [821653363c67] ...
I0816 00:12:52.984685 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 821653363c67"
I0816 00:12:53.041565 2407112 logs.go:123] Gathering logs for kubernetes-dashboard [3ef1e388df06] ...
I0816 00:12:53.041610 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3ef1e388df06"
I0816 00:12:53.063527 2407112 logs.go:123] Gathering logs for kubelet ...
I0816 00:12:53.063556 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W0816 00:12:53.119952 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.595590 1361 reflector.go:138] object-"kube-system"/"kube-proxy-token-7vfmt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kube-proxy-token-7vfmt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.120210 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.596830 1361 reflector.go:138] object-"kube-system"/"coredns": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.120436 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597148 1361 reflector.go:138] object-"kube-system"/"coredns-token-xzs4d": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "coredns-token-xzs4d" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.120668 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597346 1361 reflector.go:138] object-"kube-system"/"metrics-server-token-545hd": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "metrics-server-token-545hd" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.120885 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597537 1361 reflector.go:138] object-"kube-system"/"kube-proxy": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.121122 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597780 1361 reflector.go:138] object-"kube-system"/"storage-provisioner-token-7rcl5": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "storage-provisioner-token-7rcl5" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.121338 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597976 1361 reflector.go:138] object-"default"/"default-token-zv2bb": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "default-token-zv2bb" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.131353 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.865188 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:12:53.131864 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.961596 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.132568 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:27 old-k8s-version-894472 kubelet[1361]: E0816 00:07:27.069241 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.135723 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:40 old-k8s-version-894472 kubelet[1361]: E0816 00:07:40.936997 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:12:53.136063 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:41 old-k8s-version-894472 kubelet[1361]: E0816 00:07:41.174078 1361 reflector.go:138] object-"kubernetes-dashboard"/"kubernetes-dashboard-token-2w5nt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kubernetes-dashboard-token-2w5nt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kubernetes-dashboard": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.140916 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:48 old-k8s-version-894472 kubelet[1361]: E0816 00:07:48.480988 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:12:53.141483 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:49 old-k8s-version-894472 kubelet[1361]: E0816 00:07:49.497915 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.141711 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:52 old-k8s-version-894472 kubelet[1361]: E0816 00:07:52.902604 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.142184 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:56 old-k8s-version-894472 kubelet[1361]: E0816 00:07:56.570944 1361 pod_workers.go:191] Error syncing pod 3c69c2c8-274b-42e9-83f4-e56b1a377a84 ("storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 10s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"
W0816 00:12:53.144872 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.540574 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:12:53.147012 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.937109 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:12:53.147341 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.906978 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.147546 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.907490 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.147738 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:33 old-k8s-version-894472 kubelet[1361]: E0816 00:08:33.905951 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.150049 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:34 old-k8s-version-894472 kubelet[1361]: E0816 00:08:34.587830 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:12:53.150259 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.913192 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.152418 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.930752 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:12:53.152621 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:59 old-k8s-version-894472 kubelet[1361]: E0816 00:08:59.902173 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.152813 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:01 old-k8s-version-894472 kubelet[1361]: E0816 00:09:01.906082 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.153018 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:10 old-k8s-version-894472 kubelet[1361]: E0816 00:09:10.903130 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.153211 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:13 old-k8s-version-894472 kubelet[1361]: E0816 00:09:13.910937 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.155518 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:22 old-k8s-version-894472 kubelet[1361]: E0816 00:09:22.525085 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:12:53.155709 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:28 old-k8s-version-894472 kubelet[1361]: E0816 00:09:28.902818 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.155918 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:36 old-k8s-version-894472 kubelet[1361]: E0816 00:09:36.902337 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.156108 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:42 old-k8s-version-894472 kubelet[1361]: E0816 00:09:42.908410 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.156310 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:50 old-k8s-version-894472 kubelet[1361]: E0816 00:09:50.917752 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.156500 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:53 old-k8s-version-894472 kubelet[1361]: E0816 00:09:53.902385 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.156706 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:02 old-k8s-version-894472 kubelet[1361]: E0816 00:10:02.902250 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.156909 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:07 old-k8s-version-894472 kubelet[1361]: E0816 00:10:07.902036 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.157115 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:13 old-k8s-version-894472 kubelet[1361]: E0816 00:10:13.908849 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.159233 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:22 old-k8s-version-894472 kubelet[1361]: E0816 00:10:22.924390 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:12:53.159438 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:25 old-k8s-version-894472 kubelet[1361]: E0816 00:10:25.902239 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.159639 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.903386 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.159827 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.904171 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.160016 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:51 old-k8s-version-894472 kubelet[1361]: E0816 00:10:51.902209 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.162349 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458905 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:12:53.162553 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.902219 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.162745 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.905343 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.162934 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:16 old-k8s-version-894472 kubelet[1361]: E0816 00:11:16.908006 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.163136 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:17 old-k8s-version-894472 kubelet[1361]: E0816 00:11:17.912657 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.163324 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:31 old-k8s-version-894472 kubelet[1361]: E0816 00:11:31.902307 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.163546 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:32 old-k8s-version-894472 kubelet[1361]: E0816 00:11:32.902285 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.163735 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:43 old-k8s-version-894472 kubelet[1361]: E0816 00:11:43.902913 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.163937 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:47 old-k8s-version-894472 kubelet[1361]: E0816 00:11:47.902276 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.164127 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:56 old-k8s-version-894472 kubelet[1361]: E0816 00:11:56.907938 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.164331 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:01 old-k8s-version-894472 kubelet[1361]: E0816 00:12:01.910769 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.164519 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:07 old-k8s-version-894472 kubelet[1361]: E0816 00:12:07.902409 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.164729 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:12 old-k8s-version-894472 kubelet[1361]: E0816 00:12:12.925048 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.164919 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:18 old-k8s-version-894472 kubelet[1361]: E0816 00:12:18.903513 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.165121 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.165309 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.165511 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.165721 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.165909 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I0816 00:12:53.165921 2407112 logs.go:123] Gathering logs for coredns [f583e4715841] ...
I0816 00:12:53.165936 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 f583e4715841"
I0816 00:12:53.191198 2407112 logs.go:123] Gathering logs for container status ...
I0816 00:12:53.191227 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0816 00:12:53.281304 2407112 logs.go:123] Gathering logs for coredns [0646646e2348] ...
I0816 00:12:53.281343 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 0646646e2348"
I0816 00:12:53.302996 2407112 logs.go:123] Gathering logs for kube-scheduler [003fa784026a] ...
I0816 00:12:53.303072 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 003fa784026a"
I0816 00:12:53.328690 2407112 logs.go:123] Gathering logs for kube-proxy [c5eeddd51e95] ...
I0816 00:12:53.328718 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 c5eeddd51e95"
I0816 00:12:53.351111 2407112 logs.go:123] Gathering logs for storage-provisioner [a6efcfb5cb17] ...
I0816 00:12:53.351142 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a6efcfb5cb17"
I0816 00:12:53.372302 2407112 logs.go:123] Gathering logs for storage-provisioner [de096650c620] ...
I0816 00:12:53.372331 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 de096650c620"
I0816 00:12:53.392435 2407112 logs.go:123] Gathering logs for kube-scheduler [67be7ec054c6] ...
I0816 00:12:53.392464 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 67be7ec054c6"
I0816 00:12:53.414931 2407112 out.go:358] Setting ErrFile to fd 2...
I0816 00:12:53.414956 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
W0816 00:12:53.415035 2407112 out.go:270] X Problems detected in kubelet:
X Problems detected in kubelet:
W0816 00:12:53.415081 2407112 out.go:270] Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.415095 2407112 out.go:270] Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.415117 2407112 out.go:270] Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.415131 2407112 out.go:270] Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.415139 2407112 out.go:270] Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I0816 00:12:53.415178 2407112 out.go:358] Setting ErrFile to fd 2...
I0816 00:12:53.415187 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0816 00:13:03.417016 2407112 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0816 00:13:03.428859 2407112 api_server.go:72] duration metric: took 5m57.651838639s to wait for apiserver process to appear ...
I0816 00:13:03.428887 2407112 api_server.go:88] waiting for apiserver healthz status ...
I0816 00:13:03.428962 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
I0816 00:13:03.446552 2407112 logs.go:276] 2 containers: [682baec10b08 3d14903eaff5]
I0816 00:13:03.446627 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
I0816 00:13:03.463688 2407112 logs.go:276] 2 containers: [5aacba0afc73 15f34ed96b2b]
I0816 00:13:03.463770 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
I0816 00:13:03.485222 2407112 logs.go:276] 2 containers: [0646646e2348 f583e4715841]
I0816 00:13:03.485299 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
I0816 00:13:03.503320 2407112 logs.go:276] 2 containers: [67be7ec054c6 003fa784026a]
I0816 00:13:03.503399 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
I0816 00:13:03.522597 2407112 logs.go:276] 2 containers: [c5eeddd51e95 1e79f4e5d490]
I0816 00:13:03.522681 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
I0816 00:13:03.542588 2407112 logs.go:276] 2 containers: [cc3ceefdfcf9 821653363c67]
I0816 00:13:03.542672 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
I0816 00:13:03.566735 2407112 logs.go:276] 0 containers: []
W0816 00:13:03.566759 2407112 logs.go:278] No container was found matching "kindnet"
I0816 00:13:03.566817 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kubernetes-dashboard --format={{.ID}}
I0816 00:13:03.585575 2407112 logs.go:276] 1 containers: [3ef1e388df06]
I0816 00:13:03.585709 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
I0816 00:13:03.604527 2407112 logs.go:276] 2 containers: [a6efcfb5cb17 de096650c620]
I0816 00:13:03.604570 2407112 logs.go:123] Gathering logs for container status ...
I0816 00:13:03.604588 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0816 00:13:03.669008 2407112 logs.go:123] Gathering logs for kube-apiserver [682baec10b08] ...
I0816 00:13:03.669037 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 682baec10b08"
I0816 00:13:03.712566 2407112 logs.go:123] Gathering logs for etcd [5aacba0afc73] ...
I0816 00:13:03.712600 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 5aacba0afc73"
I0816 00:13:03.737934 2407112 logs.go:123] Gathering logs for etcd [15f34ed96b2b] ...
I0816 00:13:03.738002 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 15f34ed96b2b"
I0816 00:13:03.771409 2407112 logs.go:123] Gathering logs for coredns [f583e4715841] ...
I0816 00:13:03.771482 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 f583e4715841"
I0816 00:13:03.797096 2407112 logs.go:123] Gathering logs for kube-scheduler [67be7ec054c6] ...
I0816 00:13:03.797128 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 67be7ec054c6"
I0816 00:13:03.826332 2407112 logs.go:123] Gathering logs for kube-controller-manager [cc3ceefdfcf9] ...
I0816 00:13:03.826367 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 cc3ceefdfcf9"
I0816 00:13:03.865698 2407112 logs.go:123] Gathering logs for Docker ...
I0816 00:13:03.865729 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I0816 00:13:03.892350 2407112 logs.go:123] Gathering logs for kubelet ...
I0816 00:13:03.892378 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W0816 00:13:03.954017 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.595590 1361 reflector.go:138] object-"kube-system"/"kube-proxy-token-7vfmt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kube-proxy-token-7vfmt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.954368 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.596830 1361 reflector.go:138] object-"kube-system"/"coredns": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.954651 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597148 1361 reflector.go:138] object-"kube-system"/"coredns-token-xzs4d": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "coredns-token-xzs4d" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.954880 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597346 1361 reflector.go:138] object-"kube-system"/"metrics-server-token-545hd": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "metrics-server-token-545hd" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.955093 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597537 1361 reflector.go:138] object-"kube-system"/"kube-proxy": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.955328 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597780 1361 reflector.go:138] object-"kube-system"/"storage-provisioner-token-7rcl5": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "storage-provisioner-token-7rcl5" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.955543 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597976 1361 reflector.go:138] object-"default"/"default-token-zv2bb": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "default-token-zv2bb" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.966447 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.865188 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:13:03.967038 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.961596 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.967719 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:27 old-k8s-version-894472 kubelet[1361]: E0816 00:07:27.069241 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.970902 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:40 old-k8s-version-894472 kubelet[1361]: E0816 00:07:40.936997 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:13:03.971269 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:41 old-k8s-version-894472 kubelet[1361]: E0816 00:07:41.174078 1361 reflector.go:138] object-"kubernetes-dashboard"/"kubernetes-dashboard-token-2w5nt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kubernetes-dashboard-token-2w5nt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kubernetes-dashboard": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.975634 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:48 old-k8s-version-894472 kubelet[1361]: E0816 00:07:48.480988 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:13:03.976189 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:49 old-k8s-version-894472 kubelet[1361]: E0816 00:07:49.497915 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.976380 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:52 old-k8s-version-894472 kubelet[1361]: E0816 00:07:52.902604 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.976839 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:56 old-k8s-version-894472 kubelet[1361]: E0816 00:07:56.570944 1361 pod_workers.go:191] Error syncing pod 3c69c2c8-274b-42e9-83f4-e56b1a377a84 ("storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 10s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"
W0816 00:13:03.979485 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.540574 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:13:03.981633 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.937109 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:13:03.981960 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.906978 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.982163 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.907490 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.982356 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:33 old-k8s-version-894472 kubelet[1361]: E0816 00:08:33.905951 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.984654 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:34 old-k8s-version-894472 kubelet[1361]: E0816 00:08:34.587830 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:13:03.984859 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.913192 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.986993 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.930752 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:13:03.987196 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:59 old-k8s-version-894472 kubelet[1361]: E0816 00:08:59.902173 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.987384 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:01 old-k8s-version-894472 kubelet[1361]: E0816 00:09:01.906082 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.987586 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:10 old-k8s-version-894472 kubelet[1361]: E0816 00:09:10.903130 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.987776 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:13 old-k8s-version-894472 kubelet[1361]: E0816 00:09:13.910937 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.990067 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:22 old-k8s-version-894472 kubelet[1361]: E0816 00:09:22.525085 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:13:03.990260 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:28 old-k8s-version-894472 kubelet[1361]: E0816 00:09:28.902818 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.990464 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:36 old-k8s-version-894472 kubelet[1361]: E0816 00:09:36.902337 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.990653 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:42 old-k8s-version-894472 kubelet[1361]: E0816 00:09:42.908410 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.990862 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:50 old-k8s-version-894472 kubelet[1361]: E0816 00:09:50.917752 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.991053 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:53 old-k8s-version-894472 kubelet[1361]: E0816 00:09:53.902385 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.991255 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:02 old-k8s-version-894472 kubelet[1361]: E0816 00:10:02.902250 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.991445 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:07 old-k8s-version-894472 kubelet[1361]: E0816 00:10:07.902036 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.991646 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:13 old-k8s-version-894472 kubelet[1361]: E0816 00:10:13.908849 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.993804 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:22 old-k8s-version-894472 kubelet[1361]: E0816 00:10:22.924390 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:13:03.994007 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:25 old-k8s-version-894472 kubelet[1361]: E0816 00:10:25.902239 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.994210 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.903386 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.994398 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.904171 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.994591 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:51 old-k8s-version-894472 kubelet[1361]: E0816 00:10:51.902209 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.996873 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458905 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:13:03.997075 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.902219 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.997263 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.905343 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.997451 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:16 old-k8s-version-894472 kubelet[1361]: E0816 00:11:16.908006 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.997659 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:17 old-k8s-version-894472 kubelet[1361]: E0816 00:11:17.912657 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.997848 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:31 old-k8s-version-894472 kubelet[1361]: E0816 00:11:31.902307 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.998049 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:32 old-k8s-version-894472 kubelet[1361]: E0816 00:11:32.902285 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.998237 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:43 old-k8s-version-894472 kubelet[1361]: E0816 00:11:43.902913 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.998439 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:47 old-k8s-version-894472 kubelet[1361]: E0816 00:11:47.902276 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.998629 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:56 old-k8s-version-894472 kubelet[1361]: E0816 00:11:56.907938 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.998831 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:01 old-k8s-version-894472 kubelet[1361]: E0816 00:12:01.910769 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.999022 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:07 old-k8s-version-894472 kubelet[1361]: E0816 00:12:07.902409 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.999225 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:12 old-k8s-version-894472 kubelet[1361]: E0816 00:12:12.925048 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.999414 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:18 old-k8s-version-894472 kubelet[1361]: E0816 00:12:18.903513 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.999616 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.999804 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.000006 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.000207 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.000397 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.000599 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:58 old-k8s-version-894472 kubelet[1361]: E0816 00:12:58.902545 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.000789 2407112 logs.go:138] Found kubelet problem: Aug 16 00:13:02 old-k8s-version-894472 kubelet[1361]: E0816 00:13:02.903209 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I0816 00:13:04.000801 2407112 logs.go:123] Gathering logs for dmesg ...
I0816 00:13:04.000816 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I0816 00:13:04.020061 2407112 logs.go:123] Gathering logs for kubernetes-dashboard [3ef1e388df06] ...
I0816 00:13:04.020091 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3ef1e388df06"
I0816 00:13:04.044867 2407112 logs.go:123] Gathering logs for storage-provisioner [a6efcfb5cb17] ...
I0816 00:13:04.044946 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a6efcfb5cb17"
I0816 00:13:04.076966 2407112 logs.go:123] Gathering logs for describe nodes ...
I0816 00:13:04.077044 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I0816 00:13:04.233528 2407112 logs.go:123] Gathering logs for coredns [0646646e2348] ...
I0816 00:13:04.233554 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 0646646e2348"
I0816 00:13:04.259485 2407112 logs.go:123] Gathering logs for kube-proxy [1e79f4e5d490] ...
I0816 00:13:04.259515 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 1e79f4e5d490"
I0816 00:13:04.286001 2407112 logs.go:123] Gathering logs for kube-apiserver [3d14903eaff5] ...
I0816 00:13:04.286028 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3d14903eaff5"
I0816 00:13:04.367586 2407112 logs.go:123] Gathering logs for kube-scheduler [003fa784026a] ...
I0816 00:13:04.367621 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 003fa784026a"
I0816 00:13:04.398199 2407112 logs.go:123] Gathering logs for kube-proxy [c5eeddd51e95] ...
I0816 00:13:04.398227 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 c5eeddd51e95"
I0816 00:13:04.420433 2407112 logs.go:123] Gathering logs for kube-controller-manager [821653363c67] ...
I0816 00:13:04.420463 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 821653363c67"
I0816 00:13:04.480676 2407112 logs.go:123] Gathering logs for storage-provisioner [de096650c620] ...
I0816 00:13:04.480722 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 de096650c620"
I0816 00:13:04.501321 2407112 out.go:358] Setting ErrFile to fd 2...
I0816 00:13:04.501350 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
W0816 00:13:04.501418 2407112 out.go:270] X Problems detected in kubelet:
X Problems detected in kubelet:
W0816 00:13:04.501441 2407112 out.go:270] Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.501458 2407112 out.go:270] Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.501468 2407112 out.go:270] Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.501473 2407112 out.go:270] Aug 16 00:12:58 old-k8s-version-894472 kubelet[1361]: E0816 00:12:58.902545 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:58 old-k8s-version-894472 kubelet[1361]: E0816 00:12:58.902545 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.501479 2407112 out.go:270] Aug 16 00:13:02 old-k8s-version-894472 kubelet[1361]: E0816 00:13:02.903209 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Aug 16 00:13:02 old-k8s-version-894472 kubelet[1361]: E0816 00:13:02.903209 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I0816 00:13:04.501496 2407112 out.go:358] Setting ErrFile to fd 2...
I0816 00:13:04.501501 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0816 00:13:14.507900 2407112 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I0816 00:13:14.517839 2407112 api_server.go:279] https://192.168.85.2:8443/healthz returned 200:
ok
I0816 00:13:14.520292 2407112 out.go:201]
W0816 00:13:14.522384 2407112 out.go:270] X Exiting due to K8S_UNHEALTHY_CONTROL_PLANE: wait 6m0s for node: wait for healthy API server: controlPlane never updated to v1.20.0
X Exiting due to K8S_UNHEALTHY_CONTROL_PLANE: wait 6m0s for node: wait for healthy API server: controlPlane never updated to v1.20.0
W0816 00:13:14.522420 2407112 out.go:270] * Suggestion: Control Plane could not update, try minikube delete --all --purge
* Suggestion: Control Plane could not update, try minikube delete --all --purge
W0816 00:13:14.522438 2407112 out.go:270] * Related issue: https://github.com/kubernetes/minikube/issues/11417
* Related issue: https://github.com/kubernetes/minikube/issues/11417
W0816 00:13:14.522444 2407112 out.go:270] *
*
W0816 00:13:14.523417 2407112 out.go:293] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
I0816 00:13:14.525402 2407112 out.go:201]
** /stderr **
start_stop_delete_test.go:259: failed to start minikube post-stop. args "out/minikube-linux-arm64 start -p old-k8s-version-894472 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=docker --kubernetes-version=v1.20.0": exit status 102
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/SecondStart]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect old-k8s-version-894472
helpers_test.go:235: (dbg) docker inspect old-k8s-version-894472:
-- stdout --
[
{
"Id": "64c0bba19cec9659a7141ba86ce4ac16804f2bad1bc8890b1de204ed24b4df5f",
"Created": "2024-08-16T00:04:09.217242083Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 2407535,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-08-16T00:06:57.018774687Z",
"FinishedAt": "2024-08-16T00:06:55.82218271Z"
},
"Image": "sha256:decdd59746a9dba10062a73f6cd4b910c7b4e60613660b1022f8357747681c4d",
"ResolvConfPath": "/var/lib/docker/containers/64c0bba19cec9659a7141ba86ce4ac16804f2bad1bc8890b1de204ed24b4df5f/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/64c0bba19cec9659a7141ba86ce4ac16804f2bad1bc8890b1de204ed24b4df5f/hostname",
"HostsPath": "/var/lib/docker/containers/64c0bba19cec9659a7141ba86ce4ac16804f2bad1bc8890b1de204ed24b4df5f/hosts",
"LogPath": "/var/lib/docker/containers/64c0bba19cec9659a7141ba86ce4ac16804f2bad1bc8890b1de204ed24b4df5f/64c0bba19cec9659a7141ba86ce4ac16804f2bad1bc8890b1de204ed24b4df5f-json.log",
"Name": "/old-k8s-version-894472",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-894472:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "old-k8s-version-894472",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 2306867200,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 4613734400,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/a218c1210234467c18f43392ece19787d17d411370e1157a3e6c33511b377fc6-init/diff:/var/lib/docker/overlay2/6ed902a04c22dd3041d65f8183926fcc1f46fb9c240ed2c4472a750ce633e7fc/diff",
"MergedDir": "/var/lib/docker/overlay2/a218c1210234467c18f43392ece19787d17d411370e1157a3e6c33511b377fc6/merged",
"UpperDir": "/var/lib/docker/overlay2/a218c1210234467c18f43392ece19787d17d411370e1157a3e6c33511b377fc6/diff",
"WorkDir": "/var/lib/docker/overlay2/a218c1210234467c18f43392ece19787d17d411370e1157a3e6c33511b377fc6/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-894472",
"Source": "/var/lib/docker/volumes/old-k8s-version-894472/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-894472",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-894472",
"name.minikube.sigs.k8s.io": "old-k8s-version-894472",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "f305d36a01d1a564e6e37e3d35aaa4195e813b0f7ae24251a638945f693fd8e0",
"SandboxKey": "/var/run/docker/netns/f305d36a01d1",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "35084"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "35085"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "35088"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "35086"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "35087"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"old-k8s-version-894472": {
"IPAMConfig": {
"IPv4Address": "192.168.85.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:55:02",
"DriverOpts": null,
"NetworkID": "e8b3ff14cbdfc2a1286db62206966a0ebe57a37d2a08f526b564e5256ad6450c",
"EndpointID": "3150bec38809672b11b5a7c1b22bc2073898b23a50eb852356ccc39b9bae4fba",
"Gateway": "192.168.85.1",
"IPAddress": "192.168.85.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-894472",
"64c0bba19cec"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-894472 -n old-k8s-version-894472
helpers_test.go:244: <<< TestStartStop/group/old-k8s-version/serial/SecondStart FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/SecondStart]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 -p old-k8s-version-894472 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p old-k8s-version-894472 logs -n 25: (1.303128489s)
helpers_test.go:252: TestStartStop/group/old-k8s-version/serial/SecondStart logs:
-- stdout --
==> Audit <==
|---------|--------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|--------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| ssh | -p false-055531 sudo systemctl | false-055531 | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
| | status containerd --all --full | | | | | |
| | --no-pager | | | | | |
| ssh | -p false-055531 sudo systemctl | false-055531 | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
| | cat containerd --no-pager | | | | | |
| ssh | -p false-055531 sudo cat | false-055531 | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
| | /lib/systemd/system/containerd.service | | | | | |
| ssh | -p false-055531 sudo cat | false-055531 | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
| | /etc/containerd/config.toml | | | | | |
| ssh | -p false-055531 sudo | false-055531 | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
| | containerd config dump | | | | | |
| ssh | -p false-055531 sudo systemctl | false-055531 | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | |
| | status crio --all --full | | | | | |
| | --no-pager | | | | | |
| ssh | -p false-055531 sudo systemctl | false-055531 | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
| | cat crio --no-pager | | | | | |
| ssh | -p false-055531 sudo find | false-055531 | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
| | /etc/crio -type f -exec sh -c | | | | | |
| | 'echo {}; cat {}' \; | | | | | |
| ssh | -p false-055531 sudo crio | false-055531 | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
| | config | | | | | |
| delete | -p false-055531 | false-055531 | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:05 UTC |
| start | -p no-preload-158739 | no-preload-158739 | jenkins | v1.33.1 | 16 Aug 24 00:05 UTC | 16 Aug 24 00:06 UTC |
| | --memory=2200 | | | | | |
| | --alsologtostderr | | | | | |
| | --wait=true --preload=false | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| | --kubernetes-version=v1.31.0 | | | | | |
| addons | enable metrics-server -p no-preload-158739 | no-preload-158739 | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC | 16 Aug 24 00:06 UTC |
| | --images=MetricsServer=registry.k8s.io/echoserver:1.4 | | | | | |
| | --registries=MetricsServer=fake.domain | | | | | |
| addons | enable metrics-server -p old-k8s-version-894472 | old-k8s-version-894472 | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC | 16 Aug 24 00:06 UTC |
| | --images=MetricsServer=registry.k8s.io/echoserver:1.4 | | | | | |
| | --registries=MetricsServer=fake.domain | | | | | |
| stop | -p no-preload-158739 | no-preload-158739 | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC | 16 Aug 24 00:06 UTC |
| | --alsologtostderr -v=3 | | | | | |
| stop | -p old-k8s-version-894472 | old-k8s-version-894472 | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC | 16 Aug 24 00:06 UTC |
| | --alsologtostderr -v=3 | | | | | |
| addons | enable dashboard -p no-preload-158739 | no-preload-158739 | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC | 16 Aug 24 00:06 UTC |
| | --images=MetricsScraper=registry.k8s.io/echoserver:1.4 | | | | | |
| start | -p no-preload-158739 | no-preload-158739 | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC | 16 Aug 24 00:11 UTC |
| | --memory=2200 | | | | | |
| | --alsologtostderr | | | | | |
| | --wait=true --preload=false | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| | --kubernetes-version=v1.31.0 | | | | | |
| addons | enable dashboard -p old-k8s-version-894472 | old-k8s-version-894472 | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC | 16 Aug 24 00:06 UTC |
| | --images=MetricsScraper=registry.k8s.io/echoserver:1.4 | | | | | |
| start | -p old-k8s-version-894472 | old-k8s-version-894472 | jenkins | v1.33.1 | 16 Aug 24 00:06 UTC | |
| | --memory=2200 | | | | | |
| | --alsologtostderr --wait=true | | | | | |
| | --kvm-network=default | | | | | |
| | --kvm-qemu-uri=qemu:///system | | | | | |
| | --disable-driver-mounts | | | | | |
| | --keep-context=false | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| | --kubernetes-version=v1.20.0 | | | | | |
| image | no-preload-158739 image list | no-preload-158739 | jenkins | v1.33.1 | 16 Aug 24 00:11 UTC | 16 Aug 24 00:11 UTC |
| | --format=json | | | | | |
| pause | -p no-preload-158739 | no-preload-158739 | jenkins | v1.33.1 | 16 Aug 24 00:11 UTC | 16 Aug 24 00:11 UTC |
| | --alsologtostderr -v=1 | | | | | |
| unpause | -p no-preload-158739 | no-preload-158739 | jenkins | v1.33.1 | 16 Aug 24 00:11 UTC | 16 Aug 24 00:11 UTC |
| | --alsologtostderr -v=1 | | | | | |
| delete | -p no-preload-158739 | no-preload-158739 | jenkins | v1.33.1 | 16 Aug 24 00:11 UTC | 16 Aug 24 00:11 UTC |
| delete | -p no-preload-158739 | no-preload-158739 | jenkins | v1.33.1 | 16 Aug 24 00:11 UTC | 16 Aug 24 00:11 UTC |
| start | -p embed-certs-951478 | embed-certs-951478 | jenkins | v1.33.1 | 16 Aug 24 00:11 UTC | 16 Aug 24 00:13 UTC |
| | --memory=2200 | | | | | |
| | --alsologtostderr --wait=true | | | | | |
| | --embed-certs --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| | --kubernetes-version=v1.31.0 | | | | | |
|---------|--------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/08/16 00:11:52
Running on machine: ip-172-31-29-130
Binary: Built with gc go1.22.5 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0816 00:11:52.858524 2418220 out.go:345] Setting OutFile to fd 1 ...
I0816 00:11:52.858706 2418220 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0816 00:11:52.858720 2418220 out.go:358] Setting ErrFile to fd 2...
I0816 00:11:52.858725 2418220 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0816 00:11:52.859019 2418220 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19452-2026001/.minikube/bin
I0816 00:11:52.859473 2418220 out.go:352] Setting JSON to false
I0816 00:11:52.860683 2418220 start.go:129] hostinfo: {"hostname":"ip-172-31-29-130","uptime":32057,"bootTime":1723735056,"procs":245,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1067-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"36adf542-ef4f-4e2d-a0c8-6868d1383ff9"}
I0816 00:11:52.860750 2418220 start.go:139] virtualization:
I0816 00:11:52.864102 2418220 out.go:177] * [embed-certs-951478] minikube v1.33.1 on Ubuntu 20.04 (arm64)
I0816 00:11:52.865856 2418220 out.go:177] - MINIKUBE_LOCATION=19452
I0816 00:11:52.865967 2418220 notify.go:220] Checking for updates...
I0816 00:11:52.869669 2418220 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0816 00:11:52.871415 2418220 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/19452-2026001/kubeconfig
I0816 00:11:52.873079 2418220 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/19452-2026001/.minikube
I0816 00:11:52.875043 2418220 out.go:177] - MINIKUBE_BIN=out/minikube-linux-arm64
I0816 00:11:52.876891 2418220 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0816 00:11:52.879499 2418220 config.go:182] Loaded profile config "old-k8s-version-894472": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.20.0
I0816 00:11:52.879638 2418220 driver.go:392] Setting default libvirt URI to qemu:///system
I0816 00:11:52.913834 2418220 docker.go:123] docker version: linux-27.1.2:Docker Engine - Community
I0816 00:11:52.913949 2418220 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0816 00:11:52.975933 2418220 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:35 OomKillDisable:true NGoroutines:53 SystemTime:2024-08-16 00:11:52.966452542 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
I0816 00:11:52.976041 2418220 docker.go:307] overlay module found
I0816 00:11:52.979038 2418220 out.go:177] * Using the docker driver based on user configuration
I0816 00:11:52.981027 2418220 start.go:297] selected driver: docker
I0816 00:11:52.981047 2418220 start.go:901] validating driver "docker" against <nil>
I0816 00:11:52.981062 2418220 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0816 00:11:52.981806 2418220 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0816 00:11:53.036894 2418220 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:35 OomKillDisable:true NGoroutines:53 SystemTime:2024-08-16 00:11:53.027304915 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214900736 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1]] Warnings:<nil>}}
I0816 00:11:53.037059 2418220 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0816 00:11:53.037279 2418220 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0816 00:11:53.039368 2418220 out.go:177] * Using Docker driver with root privileges
I0816 00:11:53.041492 2418220 cni.go:84] Creating CNI manager for ""
I0816 00:11:53.041534 2418220 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0816 00:11:53.041550 2418220 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0816 00:11:53.041819 2418220 start.go:340] cluster config:
{Name:embed-certs-951478 KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:embed-certs-951478 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Contain
erRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSH
AuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0816 00:11:53.044920 2418220 out.go:177] * Starting "embed-certs-951478" primary control-plane node in "embed-certs-951478" cluster
I0816 00:11:53.046799 2418220 cache.go:121] Beginning downloading kic base image for docker with docker
I0816 00:11:53.049143 2418220 out.go:177] * Pulling base image v0.0.44-1723740748-19452 ...
I0816 00:11:53.052054 2418220 preload.go:131] Checking if preload exists for k8s version v1.31.0 and runtime docker
I0816 00:11:53.052115 2418220 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.0-docker-overlay2-arm64.tar.lz4
I0816 00:11:53.052127 2418220 cache.go:56] Caching tarball of preloaded images
I0816 00:11:53.052139 2418220 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local docker daemon
I0816 00:11:53.052221 2418220 preload.go:172] Found /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.0-docker-overlay2-arm64.tar.lz4 in cache, skipping download
I0816 00:11:53.052234 2418220 cache.go:59] Finished verifying existence of preloaded tar for v1.31.0 on docker
I0816 00:11:53.052340 2418220 profile.go:143] Saving config to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/config.json ...
I0816 00:11:53.052360 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/config.json: {Name:mk82451302fc99f46e813a7aceca107dbdcfa5ea Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
W0816 00:11:53.071643 2418220 image.go:95] image gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d is of wrong architecture
I0816 00:11:53.071665 2418220 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d to local cache
I0816 00:11:53.071743 2418220 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local cache directory
I0816 00:11:53.071766 2418220 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d in local cache directory, skipping pull
I0816 00:11:53.071775 2418220 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d exists in cache, skipping pull
I0816 00:11:53.071783 2418220 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d as a tarball
I0816 00:11:53.071789 2418220 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d from local cache
I0816 00:11:53.246602 2418220 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d from cached tarball
I0816 00:11:53.246657 2418220 cache.go:194] Successfully downloaded all kic artifacts
I0816 00:11:53.246699 2418220 start.go:360] acquireMachinesLock for embed-certs-951478: {Name:mkaf3d425cc498fe588fa88c10fbce082b5cf19b Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0816 00:11:53.247157 2418220 start.go:364] duration metric: took 429.955µs to acquireMachinesLock for "embed-certs-951478"
I0816 00:11:53.247202 2418220 start.go:93] Provisioning new machine with config: &{Name:embed-certs-951478 KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:embed-certs-951478 Namespace:default APIServerHAVIP: APIServe
rName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:f
alse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0816 00:11:53.247292 2418220 start.go:125] createHost starting for "" (driver="docker")
I0816 00:11:53.343372 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:55.838560 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:53.251535 2418220 out.go:235] * Creating docker container (CPUs=2, Memory=2200MB) ...
I0816 00:11:53.251794 2418220 start.go:159] libmachine.API.Create for "embed-certs-951478" (driver="docker")
I0816 00:11:53.251830 2418220 client.go:168] LocalClient.Create starting
I0816 00:11:53.251896 2418220 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem
I0816 00:11:53.251935 2418220 main.go:141] libmachine: Decoding PEM data...
I0816 00:11:53.251951 2418220 main.go:141] libmachine: Parsing certificate...
I0816 00:11:53.252005 2418220 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/cert.pem
I0816 00:11:53.252025 2418220 main.go:141] libmachine: Decoding PEM data...
I0816 00:11:53.252039 2418220 main.go:141] libmachine: Parsing certificate...
I0816 00:11:53.252431 2418220 cli_runner.go:164] Run: docker network inspect embed-certs-951478 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0816 00:11:53.268756 2418220 cli_runner.go:211] docker network inspect embed-certs-951478 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0816 00:11:53.268848 2418220 network_create.go:284] running [docker network inspect embed-certs-951478] to gather additional debugging logs...
I0816 00:11:53.268870 2418220 cli_runner.go:164] Run: docker network inspect embed-certs-951478
W0816 00:11:53.287655 2418220 cli_runner.go:211] docker network inspect embed-certs-951478 returned with exit code 1
I0816 00:11:53.287691 2418220 network_create.go:287] error running [docker network inspect embed-certs-951478]: docker network inspect embed-certs-951478: exit status 1
stdout:
[]
stderr:
Error response from daemon: network embed-certs-951478 not found
I0816 00:11:53.287704 2418220 network_create.go:289] output of [docker network inspect embed-certs-951478]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network embed-certs-951478 not found
** /stderr **
I0816 00:11:53.287858 2418220 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0816 00:11:53.304647 2418220 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-63fa5ee8e5ef IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:02:42:bd:d4:ba:4c} reservation:<nil>}
I0816 00:11:53.305080 2418220 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-159047428ed5 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:02:42:a7:bc:29:e1} reservation:<nil>}
I0816 00:11:53.305518 2418220 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-701a52de9afc IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:02:42:8a:56:38:33} reservation:<nil>}
I0816 00:11:53.306111 2418220 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40018aa0b0}
I0816 00:11:53.306149 2418220 network_create.go:124] attempt to create docker network embed-certs-951478 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I0816 00:11:53.306217 2418220 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=embed-certs-951478 embed-certs-951478
I0816 00:11:53.394923 2418220 network_create.go:108] docker network embed-certs-951478 192.168.76.0/24 created
I0816 00:11:53.394958 2418220 kic.go:121] calculated static IP "192.168.76.2" for the "embed-certs-951478" container
I0816 00:11:53.395050 2418220 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0816 00:11:53.416557 2418220 cli_runner.go:164] Run: docker volume create embed-certs-951478 --label name.minikube.sigs.k8s.io=embed-certs-951478 --label created_by.minikube.sigs.k8s.io=true
I0816 00:11:53.434689 2418220 oci.go:103] Successfully created a docker volume embed-certs-951478
I0816 00:11:53.434782 2418220 cli_runner.go:164] Run: docker run --rm --name embed-certs-951478-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=embed-certs-951478 --entrypoint /usr/bin/test -v embed-certs-951478:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d -d /var/lib
I0816 00:11:54.095026 2418220 oci.go:107] Successfully prepared a docker volume embed-certs-951478
I0816 00:11:54.095082 2418220 preload.go:131] Checking if preload exists for k8s version v1.31.0 and runtime docker
I0816 00:11:54.095102 2418220 kic.go:194] Starting extracting preloaded images to volume ...
I0816 00:11:54.095185 2418220 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.0-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v embed-certs-951478:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d -I lz4 -xf /preloaded.tar -C /extractDir
I0816 00:11:57.838907 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:59.839494 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:11:58.044105 2418220 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19452-2026001/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.0-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v embed-certs-951478:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d -I lz4 -xf /preloaded.tar -C /extractDir: (3.948870819s)
I0816 00:11:58.044135 2418220 kic.go:203] duration metric: took 3.94902846s to extract preloaded images to volume ...
W0816 00:11:58.044296 2418220 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0816 00:11:58.044407 2418220 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0816 00:11:58.097979 2418220 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname embed-certs-951478 --name embed-certs-951478 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=embed-certs-951478 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=embed-certs-951478 --network embed-certs-951478 --ip 192.168.76.2 --volume embed-certs-951478:/var --security-opt apparmor=unconfined --memory=2200mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d
I0816 00:11:58.453583 2418220 cli_runner.go:164] Run: docker container inspect embed-certs-951478 --format={{.State.Running}}
I0816 00:11:58.476619 2418220 cli_runner.go:164] Run: docker container inspect embed-certs-951478 --format={{.State.Status}}
I0816 00:11:58.500432 2418220 cli_runner.go:164] Run: docker exec embed-certs-951478 stat /var/lib/dpkg/alternatives/iptables
I0816 00:11:58.557486 2418220 oci.go:144] the created container "embed-certs-951478" has a running status.
I0816 00:11:58.557513 2418220 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa...
I0816 00:11:59.396695 2418220 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0816 00:11:59.421830 2418220 cli_runner.go:164] Run: docker container inspect embed-certs-951478 --format={{.State.Status}}
I0816 00:11:59.449883 2418220 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0816 00:11:59.449905 2418220 kic_runner.go:114] Args: [docker exec --privileged embed-certs-951478 chown docker:docker /home/docker/.ssh/authorized_keys]
I0816 00:11:59.510542 2418220 cli_runner.go:164] Run: docker container inspect embed-certs-951478 --format={{.State.Status}}
I0816 00:11:59.551991 2418220 machine.go:93] provisionDockerMachine start ...
I0816 00:11:59.552082 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
I0816 00:11:59.574972 2418220 main.go:141] libmachine: Using SSH client type: native
I0816 00:11:59.575247 2418220 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil> [] 0s} 127.0.0.1 35089 <nil> <nil>}
I0816 00:11:59.575257 2418220 main.go:141] libmachine: About to run SSH command:
hostname
I0816 00:11:59.717314 2418220 main.go:141] libmachine: SSH cmd err, output: <nil>: embed-certs-951478
I0816 00:11:59.717336 2418220 ubuntu.go:169] provisioning hostname "embed-certs-951478"
I0816 00:11:59.717410 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
I0816 00:11:59.738696 2418220 main.go:141] libmachine: Using SSH client type: native
I0816 00:11:59.739041 2418220 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil> [] 0s} 127.0.0.1 35089 <nil> <nil>}
I0816 00:11:59.739058 2418220 main.go:141] libmachine: About to run SSH command:
sudo hostname embed-certs-951478 && echo "embed-certs-951478" | sudo tee /etc/hostname
I0816 00:11:59.897922 2418220 main.go:141] libmachine: SSH cmd err, output: <nil>: embed-certs-951478
I0816 00:11:59.898047 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
I0816 00:11:59.917185 2418220 main.go:141] libmachine: Using SSH client type: native
I0816 00:11:59.917440 2418220 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil> [] 0s} 127.0.0.1 35089 <nil> <nil>}
I0816 00:11:59.917459 2418220 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sembed-certs-951478' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 embed-certs-951478/g' /etc/hosts;
else
echo '127.0.1.1 embed-certs-951478' | sudo tee -a /etc/hosts;
fi
fi
I0816 00:12:00.079149 2418220 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0816 00:12:00.079240 2418220 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19452-2026001/.minikube CaCertPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19452-2026001/.minikube}
I0816 00:12:00.079317 2418220 ubuntu.go:177] setting up certificates
I0816 00:12:00.079358 2418220 provision.go:84] configureAuth start
I0816 00:12:00.079470 2418220 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" embed-certs-951478
I0816 00:12:00.131755 2418220 provision.go:143] copyHostCerts
I0816 00:12:00.131839 2418220 exec_runner.go:144] found /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.pem, removing ...
I0816 00:12:00.131850 2418220 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.pem
I0816 00:12:00.131943 2418220 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.pem (1082 bytes)
I0816 00:12:00.132342 2418220 exec_runner.go:144] found /home/jenkins/minikube-integration/19452-2026001/.minikube/cert.pem, removing ...
I0816 00:12:00.132368 2418220 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19452-2026001/.minikube/cert.pem
I0816 00:12:00.132425 2418220 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19452-2026001/.minikube/cert.pem (1123 bytes)
I0816 00:12:00.132560 2418220 exec_runner.go:144] found /home/jenkins/minikube-integration/19452-2026001/.minikube/key.pem, removing ...
I0816 00:12:00.132568 2418220 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19452-2026001/.minikube/key.pem
I0816 00:12:00.132603 2418220 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19452-2026001/.minikube/key.pem (1675 bytes)
I0816 00:12:00.132684 2418220 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca-key.pem org=jenkins.embed-certs-951478 san=[127.0.0.1 192.168.76.2 embed-certs-951478 localhost minikube]
I0816 00:12:00.660182 2418220 provision.go:177] copyRemoteCerts
I0816 00:12:00.660293 2418220 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0816 00:12:00.660342 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
I0816 00:12:00.677903 2418220 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35089 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa Username:docker}
I0816 00:12:00.770741 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0816 00:12:00.795035 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server.pem --> /etc/docker/server.pem (1224 bytes)
I0816 00:12:00.820111 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0816 00:12:00.846492 2418220 provision.go:87] duration metric: took 767.106659ms to configureAuth
I0816 00:12:00.846517 2418220 ubuntu.go:193] setting minikube options for container-runtime
I0816 00:12:00.846698 2418220 config.go:182] Loaded profile config "embed-certs-951478": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
I0816 00:12:00.846753 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
I0816 00:12:00.863080 2418220 main.go:141] libmachine: Using SSH client type: native
I0816 00:12:00.863335 2418220 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil> [] 0s} 127.0.0.1 35089 <nil> <nil>}
I0816 00:12:00.863352 2418220 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0816 00:12:01.006556 2418220 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0816 00:12:01.006582 2418220 ubuntu.go:71] root file system type: overlay
I0816 00:12:01.006719 2418220 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0816 00:12:01.006801 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
I0816 00:12:01.026029 2418220 main.go:141] libmachine: Using SSH client type: native
I0816 00:12:01.026278 2418220 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil> [] 0s} 127.0.0.1 35089 <nil> <nil>}
I0816 00:12:01.026365 2418220 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0816 00:12:01.172381 2418220 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0816 00:12:01.172570 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
I0816 00:12:01.193164 2418220 main.go:141] libmachine: Using SSH client type: native
I0816 00:12:01.193510 2418220 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e49d0] 0x3e7230 <nil> [] 0s} 127.0.0.1 35089 <nil> <nil>}
I0816 00:12:01.193534 2418220 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0816 00:12:02.064806 2418220 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2024-08-12 11:49:05.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2024-08-16 00:12:01.169480883 +0000
@@ -1,46 +1,49 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
-Wants=network-online.target containerd.service
+BindsTo=containerd.service
+After=network-online.target firewalld.service containerd.service
+Wants=network-online.target
Requires=docker.socket
+StartLimitBurst=3
+StartLimitIntervalSec=60
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
-Restart=always
+Restart=on-failure
-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
-# to make them work for either version of systemd.
-StartLimitBurst=3
-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
-# this option work for either version of systemd.
-StartLimitInterval=60s
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
-OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0816 00:12:02.064847 2418220 machine.go:96] duration metric: took 2.512837254s to provisionDockerMachine
I0816 00:12:02.064858 2418220 client.go:171] duration metric: took 8.813017466s to LocalClient.Create
I0816 00:12:02.064873 2418220 start.go:167] duration metric: took 8.813079372s to libmachine.API.Create "embed-certs-951478"
I0816 00:12:02.064881 2418220 start.go:293] postStartSetup for "embed-certs-951478" (driver="docker")
I0816 00:12:02.064894 2418220 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0816 00:12:02.064975 2418220 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0816 00:12:02.065019 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
I0816 00:12:02.083025 2418220 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35089 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa Username:docker}
I0816 00:12:02.183115 2418220 ssh_runner.go:195] Run: cat /etc/os-release
I0816 00:12:02.186672 2418220 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0816 00:12:02.186709 2418220 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0816 00:12:02.186734 2418220 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0816 00:12:02.186742 2418220 info.go:137] Remote host: Ubuntu 22.04.4 LTS
I0816 00:12:02.186752 2418220 filesync.go:126] Scanning /home/jenkins/minikube-integration/19452-2026001/.minikube/addons for local assets ...
I0816 00:12:02.186812 2418220 filesync.go:126] Scanning /home/jenkins/minikube-integration/19452-2026001/.minikube/files for local assets ...
I0816 00:12:02.186893 2418220 filesync.go:149] local asset: /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem -> 20313962.pem in /etc/ssl/certs
I0816 00:12:02.186997 2418220 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0816 00:12:02.196047 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem --> /etc/ssl/certs/20313962.pem (1708 bytes)
I0816 00:12:02.221475 2418220 start.go:296] duration metric: took 156.577962ms for postStartSetup
I0816 00:12:02.221860 2418220 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" embed-certs-951478
I0816 00:12:02.243604 2418220 profile.go:143] Saving config to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/config.json ...
I0816 00:12:02.243915 2418220 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0816 00:12:02.243967 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
I0816 00:12:02.260757 2418220 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35089 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa Username:docker}
I0816 00:12:02.354609 2418220 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0816 00:12:02.359646 2418220 start.go:128] duration metric: took 9.112328697s to createHost
I0816 00:12:02.359682 2418220 start.go:83] releasing machines lock for "embed-certs-951478", held for 9.112502288s
I0816 00:12:02.359788 2418220 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" embed-certs-951478
I0816 00:12:02.376769 2418220 ssh_runner.go:195] Run: cat /version.json
I0816 00:12:02.376851 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
I0816 00:12:02.377162 2418220 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0816 00:12:02.377230 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
I0816 00:12:02.394583 2418220 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35089 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa Username:docker}
I0816 00:12:02.415353 2418220 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35089 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa Username:docker}
I0816 00:12:02.485217 2418220 ssh_runner.go:195] Run: systemctl --version
I0816 00:12:02.628573 2418220 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0816 00:12:02.632897 2418220 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0816 00:12:02.658493 2418220 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0816 00:12:02.658614 2418220 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0816 00:12:02.688844 2418220 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0816 00:12:02.688873 2418220 start.go:495] detecting cgroup driver to use...
I0816 00:12:02.688909 2418220 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0816 00:12:02.689026 2418220 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0816 00:12:02.706512 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0816 00:12:02.718130 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0816 00:12:02.729240 2418220 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0816 00:12:02.729305 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0816 00:12:02.739565 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0816 00:12:02.751443 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0816 00:12:02.761866 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0816 00:12:02.772032 2418220 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0816 00:12:02.782187 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0816 00:12:02.792161 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0816 00:12:02.802920 2418220 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0816 00:12:02.814194 2418220 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0816 00:12:02.823612 2418220 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0816 00:12:02.832302 2418220 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0816 00:12:02.934319 2418220 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0816 00:12:03.044008 2418220 start.go:495] detecting cgroup driver to use...
I0816 00:12:03.044110 2418220 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0816 00:12:03.044203 2418220 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0816 00:12:03.065219 2418220 cruntime.go:279] skipping containerd shutdown because we are bound to it
I0816 00:12:03.065352 2418220 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0816 00:12:03.079776 2418220 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0816 00:12:03.099909 2418220 ssh_runner.go:195] Run: which cri-dockerd
I0816 00:12:03.104978 2418220 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0816 00:12:03.116382 2418220 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I0816 00:12:03.158620 2418220 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0816 00:12:03.288059 2418220 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0816 00:12:03.399961 2418220 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0816 00:12:03.400176 2418220 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0816 00:12:03.420911 2418220 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0816 00:12:03.527133 2418220 ssh_runner.go:195] Run: sudo systemctl restart docker
I0816 00:12:03.828027 2418220 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0816 00:12:03.847150 2418220 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0816 00:12:03.861253 2418220 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0816 00:12:03.960159 2418220 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0816 00:12:04.051165 2418220 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0816 00:12:04.149394 2418220 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0816 00:12:04.164191 2418220 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0816 00:12:04.176914 2418220 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0816 00:12:04.279586 2418220 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0816 00:12:04.371250 2418220 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0816 00:12:04.371347 2418220 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0816 00:12:04.375488 2418220 start.go:563] Will wait 60s for crictl version
I0816 00:12:04.375573 2418220 ssh_runner.go:195] Run: which crictl
I0816 00:12:04.379030 2418220 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0816 00:12:04.426385 2418220 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 27.1.2
RuntimeApiVersion: v1
I0816 00:12:04.426483 2418220 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0816 00:12:04.449167 2418220 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0816 00:12:01.841503 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:04.338013 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:06.340432 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:04.498171 2418220 out.go:235] * Preparing Kubernetes v1.31.0 on Docker 27.1.2 ...
I0816 00:12:04.498273 2418220 cli_runner.go:164] Run: docker network inspect embed-certs-951478 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0816 00:12:04.518125 2418220 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I0816 00:12:04.522596 2418220 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0816 00:12:04.536643 2418220 kubeadm.go:883] updating cluster {Name:embed-certs-951478 KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:embed-certs-951478 Namespace:default APIServerHAVIP: APIServerName:minikubeCA AP
IServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0816 00:12:04.536784 2418220 preload.go:131] Checking if preload exists for k8s version v1.31.0 and runtime docker
I0816 00:12:04.536848 2418220 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0816 00:12:04.559266 2418220 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.0
registry.k8s.io/kube-scheduler:v1.31.0
registry.k8s.io/kube-controller-manager:v1.31.0
registry.k8s.io/kube-proxy:v1.31.0
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
registry.k8s.io/coredns/coredns:v1.11.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0816 00:12:04.559291 2418220 docker.go:615] Images already preloaded, skipping extraction
I0816 00:12:04.559361 2418220 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0816 00:12:04.584452 2418220 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.0
registry.k8s.io/kube-scheduler:v1.31.0
registry.k8s.io/kube-controller-manager:v1.31.0
registry.k8s.io/kube-proxy:v1.31.0
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
registry.k8s.io/coredns/coredns:v1.11.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0816 00:12:04.584478 2418220 cache_images.go:84] Images are preloaded, skipping loading
I0816 00:12:04.584497 2418220 kubeadm.go:934] updating node { 192.168.76.2 8443 v1.31.0 docker true true} ...
I0816 00:12:04.584595 2418220 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.31.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=embed-certs-951478 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.31.0 ClusterName:embed-certs-951478 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0816 00:12:04.584668 2418220 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0816 00:12:04.651013 2418220 cni.go:84] Creating CNI manager for ""
I0816 00:12:04.651044 2418220 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0816 00:12:04.651066 2418220 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0816 00:12:04.651086 2418220 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.31.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:embed-certs-951478 NodeName:embed-certs-951478 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:
/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0816 00:12:04.651251 2418220 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "embed-certs-951478"
kubeletExtraArgs:
node-ip: 192.168.76.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.31.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0816 00:12:04.651326 2418220 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.31.0
I0816 00:12:04.661002 2418220 binaries.go:44] Found k8s binaries, skipping transfer
I0816 00:12:04.661128 2418220 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0816 00:12:04.670347 2418220 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
I0816 00:12:04.689568 2418220 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0816 00:12:04.709361 2418220 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2160 bytes)
I0816 00:12:04.728114 2418220 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I0816 00:12:04.731663 2418220 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0816 00:12:04.744044 2418220 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0816 00:12:04.842165 2418220 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0816 00:12:04.859515 2418220 certs.go:68] Setting up /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478 for IP: 192.168.76.2
I0816 00:12:04.859538 2418220 certs.go:194] generating shared ca certs ...
I0816 00:12:04.859555 2418220 certs.go:226] acquiring lock for ca certs: {Name:mkddf294a5c2bc6874920ab9b3e5ac4767302c25 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0816 00:12:04.859688 2418220 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.key
I0816 00:12:04.859736 2418220 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/proxy-client-ca.key
I0816 00:12:04.859749 2418220 certs.go:256] generating profile certs ...
I0816 00:12:04.859804 2418220 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/client.key
I0816 00:12:04.859821 2418220 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/client.crt with IP's: []
I0816 00:12:05.337591 2418220 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/client.crt ...
I0816 00:12:05.337657 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/client.crt: {Name:mk66acae4dba4c0e6f8b3691682ccc08e20ed8fb Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0816 00:12:05.338293 2418220 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/client.key ...
I0816 00:12:05.338317 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/client.key: {Name:mk56dcbe25ac76068b8b8c268c775b98a6f606ab Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0816 00:12:05.338479 2418220 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.key.742a43b5
I0816 00:12:05.338503 2418220 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.crt.742a43b5 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I0816 00:12:05.959980 2418220 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.crt.742a43b5 ...
I0816 00:12:05.960017 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.crt.742a43b5: {Name:mkd19119812e3979aba84a5f28384010afcbe451 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0816 00:12:05.960605 2418220 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.key.742a43b5 ...
I0816 00:12:05.960626 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.key.742a43b5: {Name:mkb2bc172500f8845b5c37850b85cb33307854d4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0816 00:12:05.961261 2418220 certs.go:381] copying /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.crt.742a43b5 -> /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.crt
I0816 00:12:05.961361 2418220 certs.go:385] copying /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.key.742a43b5 -> /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.key
I0816 00:12:05.961438 2418220 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.key
I0816 00:12:05.961461 2418220 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.crt with IP's: []
I0816 00:12:06.500714 2418220 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.crt ...
I0816 00:12:06.500747 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.crt: {Name:mkbd1b9d32ab9b584765ac042a8beb09a4272123 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0816 00:12:06.500929 2418220 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.key ...
I0816 00:12:06.500947 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.key: {Name:mk4e167089caf226b28f1353facbccc7b07f9235 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0816 00:12:06.501141 2418220 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/2031396.pem (1338 bytes)
W0816 00:12:06.501188 2418220 certs.go:480] ignoring /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/2031396_empty.pem, impossibly tiny 0 bytes
I0816 00:12:06.501202 2418220 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca-key.pem (1679 bytes)
I0816 00:12:06.501230 2418220 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/ca.pem (1082 bytes)
I0816 00:12:06.501259 2418220 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/cert.pem (1123 bytes)
I0816 00:12:06.501289 2418220 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/key.pem (1675 bytes)
I0816 00:12:06.501336 2418220 certs.go:484] found cert: /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem (1708 bytes)
I0816 00:12:06.502082 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0816 00:12:06.527666 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0816 00:12:06.553368 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0816 00:12:06.579782 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0816 00:12:06.605806 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1428 bytes)
I0816 00:12:06.631485 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0816 00:12:06.657393 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0816 00:12:06.682949 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/profiles/embed-certs-951478/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0816 00:12:06.707769 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/certs/2031396.pem --> /usr/share/ca-certificates/2031396.pem (1338 bytes)
I0816 00:12:06.733548 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/files/etc/ssl/certs/20313962.pem --> /usr/share/ca-certificates/20313962.pem (1708 bytes)
I0816 00:12:06.759041 2418220 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19452-2026001/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0816 00:12:06.784180 2418220 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0816 00:12:06.802964 2418220 ssh_runner.go:195] Run: openssl version
I0816 00:12:06.808736 2418220 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/2031396.pem && ln -fs /usr/share/ca-certificates/2031396.pem /etc/ssl/certs/2031396.pem"
I0816 00:12:06.818641 2418220 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2031396.pem
I0816 00:12:06.822668 2418220 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Aug 15 23:13 /usr/share/ca-certificates/2031396.pem
I0816 00:12:06.822733 2418220 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2031396.pem
I0816 00:12:06.829847 2418220 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/2031396.pem /etc/ssl/certs/51391683.0"
I0816 00:12:06.841351 2418220 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/20313962.pem && ln -fs /usr/share/ca-certificates/20313962.pem /etc/ssl/certs/20313962.pem"
I0816 00:12:06.851576 2418220 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/20313962.pem
I0816 00:12:06.855441 2418220 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Aug 15 23:13 /usr/share/ca-certificates/20313962.pem
I0816 00:12:06.855504 2418220 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/20313962.pem
I0816 00:12:06.862508 2418220 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/20313962.pem /etc/ssl/certs/3ec20f2e.0"
I0816 00:12:06.871943 2418220 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0816 00:12:06.885183 2418220 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0816 00:12:06.888730 2418220 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Aug 15 23:06 /usr/share/ca-certificates/minikubeCA.pem
I0816 00:12:06.888795 2418220 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0816 00:12:06.895795 2418220 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0816 00:12:06.907234 2418220 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0816 00:12:06.911424 2418220 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0816 00:12:06.911519 2418220 kubeadm.go:392] StartCluster: {Name:embed-certs-951478 KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:embed-certs-951478 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISe
rverNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0816 00:12:06.911654 2418220 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0816 00:12:06.931600 2418220 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0816 00:12:06.940701 2418220 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0816 00:12:06.950675 2418220 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0816 00:12:06.950751 2418220 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0816 00:12:06.960103 2418220 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0816 00:12:06.960125 2418220 kubeadm.go:157] found existing configuration files:
I0816 00:12:06.960198 2418220 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0816 00:12:06.969241 2418220 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0816 00:12:06.969308 2418220 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0816 00:12:06.978006 2418220 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0816 00:12:06.987765 2418220 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0816 00:12:06.987836 2418220 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0816 00:12:06.996521 2418220 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0816 00:12:07.006173 2418220 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0816 00:12:07.006308 2418220 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0816 00:12:07.015686 2418220 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0816 00:12:07.025031 2418220 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0816 00:12:07.025096 2418220 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0816 00:12:07.033894 2418220 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0816 00:12:07.074916 2418220 kubeadm.go:310] [init] Using Kubernetes version: v1.31.0
I0816 00:12:07.075009 2418220 kubeadm.go:310] [preflight] Running pre-flight checks
I0816 00:12:07.097390 2418220 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0816 00:12:07.097477 2418220 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1067-aws[0m
I0816 00:12:07.097542 2418220 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0816 00:12:07.097599 2418220 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0816 00:12:07.097660 2418220 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0816 00:12:07.097719 2418220 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0816 00:12:07.097784 2418220 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0816 00:12:07.097840 2418220 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0816 00:12:07.097910 2418220 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0816 00:12:07.097972 2418220 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0816 00:12:07.098037 2418220 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0816 00:12:07.098103 2418220 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0816 00:12:07.161709 2418220 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0816 00:12:07.161839 2418220 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0816 00:12:07.161937 2418220 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0816 00:12:07.184891 2418220 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0816 00:12:07.189145 2418220 out.go:235] - Generating certificates and keys ...
I0816 00:12:07.189249 2418220 kubeadm.go:310] [certs] Using existing ca certificate authority
I0816 00:12:07.189320 2418220 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0816 00:12:07.387551 2418220 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0816 00:12:08.847475 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:11.339958 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:08.117701 2418220 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0816 00:12:08.527623 2418220 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0816 00:12:09.032530 2418220 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0816 00:12:09.377734 2418220 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0816 00:12:09.378068 2418220 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [embed-certs-951478 localhost] and IPs [192.168.76.2 127.0.0.1 ::1]
I0816 00:12:10.081288 2418220 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0816 00:12:10.081663 2418220 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [embed-certs-951478 localhost] and IPs [192.168.76.2 127.0.0.1 ::1]
I0816 00:12:10.535653 2418220 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0816 00:12:10.808876 2418220 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0816 00:12:11.320771 2418220 kubeadm.go:310] [certs] Generating "sa" key and public key
I0816 00:12:11.321096 2418220 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0816 00:12:11.805389 2418220 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0816 00:12:12.158137 2418220 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0816 00:12:12.427642 2418220 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0816 00:12:12.707148 2418220 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0816 00:12:13.032553 2418220 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0816 00:12:13.033346 2418220 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0816 00:12:13.036465 2418220 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0816 00:12:13.341238 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:15.358241 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:13.038843 2418220 out.go:235] - Booting up control plane ...
I0816 00:12:13.038945 2418220 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0816 00:12:13.039020 2418220 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0816 00:12:13.039732 2418220 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0816 00:12:13.051965 2418220 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0816 00:12:13.060044 2418220 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0816 00:12:13.060100 2418220 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0816 00:12:13.169045 2418220 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0816 00:12:13.169162 2418220 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0816 00:12:14.174832 2418220 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.005676671s
I0816 00:12:14.174939 2418220 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I0816 00:12:20.676135 2418220 kubeadm.go:310] [api-check] The API server is healthy after 6.501484628s
I0816 00:12:20.696164 2418220 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0816 00:12:20.709659 2418220 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0816 00:12:20.745600 2418220 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0816 00:12:20.745828 2418220 kubeadm.go:310] [mark-control-plane] Marking the node embed-certs-951478 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0816 00:12:20.758460 2418220 kubeadm.go:310] [bootstrap-token] Using token: bdnnnv.yz6t0ov6x5s6xvn8
I0816 00:12:17.842997 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:20.343831 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:20.760813 2418220 out.go:235] - Configuring RBAC rules ...
I0816 00:12:20.761032 2418220 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0816 00:12:20.767109 2418220 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0816 00:12:20.776820 2418220 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0816 00:12:20.781967 2418220 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0816 00:12:20.801745 2418220 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0816 00:12:20.807596 2418220 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0816 00:12:21.083960 2418220 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0816 00:12:21.523187 2418220 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0816 00:12:22.083227 2418220 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0816 00:12:22.084458 2418220 kubeadm.go:310]
I0816 00:12:22.084543 2418220 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0816 00:12:22.084559 2418220 kubeadm.go:310]
I0816 00:12:22.084655 2418220 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0816 00:12:22.084672 2418220 kubeadm.go:310]
I0816 00:12:22.084699 2418220 kubeadm.go:310] mkdir -p $HOME/.kube
I0816 00:12:22.084770 2418220 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0816 00:12:22.084825 2418220 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0816 00:12:22.084834 2418220 kubeadm.go:310]
I0816 00:12:22.084887 2418220 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0816 00:12:22.084902 2418220 kubeadm.go:310]
I0816 00:12:22.084955 2418220 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0816 00:12:22.084963 2418220 kubeadm.go:310]
I0816 00:12:22.085014 2418220 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0816 00:12:22.085095 2418220 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0816 00:12:22.085167 2418220 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0816 00:12:22.085176 2418220 kubeadm.go:310]
I0816 00:12:22.085257 2418220 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0816 00:12:22.085335 2418220 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0816 00:12:22.085344 2418220 kubeadm.go:310]
I0816 00:12:22.085456 2418220 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token bdnnnv.yz6t0ov6x5s6xvn8 \
I0816 00:12:22.085575 2418220 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:4ca757b3a2e756ea9f20bac9790b7eeeaad243a2d641f2fcc3157bb9ecd2082f \
I0816 00:12:22.085638 2418220 kubeadm.go:310] --control-plane
I0816 00:12:22.085653 2418220 kubeadm.go:310]
I0816 00:12:22.085738 2418220 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0816 00:12:22.085754 2418220 kubeadm.go:310]
I0816 00:12:22.085840 2418220 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token bdnnnv.yz6t0ov6x5s6xvn8 \
I0816 00:12:22.085951 2418220 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:4ca757b3a2e756ea9f20bac9790b7eeeaad243a2d641f2fcc3157bb9ecd2082f
I0816 00:12:22.091855 2418220 kubeadm.go:310] W0816 00:12:07.071548 1841 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "ClusterConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0816 00:12:22.092233 2418220 kubeadm.go:310] W0816 00:12:07.072512 1841 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "InitConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0816 00:12:22.092496 2418220 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1067-aws\n", err: exit status 1
I0816 00:12:22.092797 2418220 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0816 00:12:22.092860 2418220 cni.go:84] Creating CNI manager for ""
I0816 00:12:22.092883 2418220 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0816 00:12:22.094886 2418220 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
I0816 00:12:22.096640 2418220 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I0816 00:12:22.105691 2418220 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I0816 00:12:22.127353 2418220 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0816 00:12:22.127478 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0816 00:12:22.127563 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes embed-certs-951478 minikube.k8s.io/updated_at=2024_08_16T00_12_22_0700 minikube.k8s.io/version=v1.33.1 minikube.k8s.io/commit=fe9c1d9e27059a205b0df8e5e482803b65ef8774 minikube.k8s.io/name=embed-certs-951478 minikube.k8s.io/primary=true
I0816 00:12:22.296677 2418220 ops.go:34] apiserver oom_adj: -16
I0816 00:12:22.296777 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0816 00:12:22.796981 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0816 00:12:23.296930 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0816 00:12:23.796873 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0816 00:12:24.296831 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0816 00:12:24.797739 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0816 00:12:25.296897 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0816 00:12:25.797876 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0816 00:12:26.297402 2418220 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0816 00:12:26.402965 2418220 kubeadm.go:1113] duration metric: took 4.275532331s to wait for elevateKubeSystemPrivileges
I0816 00:12:26.402997 2418220 kubeadm.go:394] duration metric: took 19.491487864s to StartCluster
I0816 00:12:26.403016 2418220 settings.go:142] acquiring lock: {Name:mkd932093f6b6db884e5d5f97d2ea9be134ab309 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0816 00:12:26.403084 2418220 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/19452-2026001/kubeconfig
I0816 00:12:26.404476 2418220 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19452-2026001/kubeconfig: {Name:mkb1a4d12f06c0f193e7cb7c118eeb997c3969bc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0816 00:12:26.404725 2418220 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0816 00:12:26.404852 2418220 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0816 00:12:26.405125 2418220 config.go:182] Loaded profile config "embed-certs-951478": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.0
I0816 00:12:26.405167 2418220 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0816 00:12:26.405231 2418220 addons.go:69] Setting storage-provisioner=true in profile "embed-certs-951478"
I0816 00:12:26.405252 2418220 addons.go:234] Setting addon storage-provisioner=true in "embed-certs-951478"
I0816 00:12:26.405277 2418220 host.go:66] Checking if "embed-certs-951478" exists ...
I0816 00:12:26.406088 2418220 addons.go:69] Setting default-storageclass=true in profile "embed-certs-951478"
I0816 00:12:26.406117 2418220 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "embed-certs-951478"
I0816 00:12:26.406218 2418220 cli_runner.go:164] Run: docker container inspect embed-certs-951478 --format={{.State.Status}}
I0816 00:12:26.406379 2418220 cli_runner.go:164] Run: docker container inspect embed-certs-951478 --format={{.State.Status}}
I0816 00:12:26.407352 2418220 out.go:177] * Verifying Kubernetes components...
I0816 00:12:26.409961 2418220 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0816 00:12:26.440984 2418220 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0816 00:12:22.839620 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:25.338275 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:26.442928 2418220 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0816 00:12:26.442946 2418220 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0816 00:12:26.443011 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
I0816 00:12:26.459378 2418220 addons.go:234] Setting addon default-storageclass=true in "embed-certs-951478"
I0816 00:12:26.459423 2418220 host.go:66] Checking if "embed-certs-951478" exists ...
I0816 00:12:26.459849 2418220 cli_runner.go:164] Run: docker container inspect embed-certs-951478 --format={{.State.Status}}
I0816 00:12:26.488953 2418220 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35089 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa Username:docker}
I0816 00:12:26.502024 2418220 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I0816 00:12:26.502044 2418220 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0816 00:12:26.502112 2418220 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-951478
I0816 00:12:26.532453 2418220 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35089 SSHKeyPath:/home/jenkins/minikube-integration/19452-2026001/.minikube/machines/embed-certs-951478/id_rsa Username:docker}
I0816 00:12:26.772261 2418220 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0816 00:12:26.772420 2418220 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0816 00:12:26.803521 2418220 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0816 00:12:26.825115 2418220 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0816 00:12:27.696330 2418220 node_ready.go:35] waiting up to 6m0s for node "embed-certs-951478" to be "Ready" ...
I0816 00:12:27.696426 2418220 start.go:971] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I0816 00:12:27.719736 2418220 node_ready.go:49] node "embed-certs-951478" has status "Ready":"True"
I0816 00:12:27.719759 2418220 node_ready.go:38] duration metric: took 23.306081ms for node "embed-certs-951478" to be "Ready" ...
I0816 00:12:27.719768 2418220 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0816 00:12:27.740423 2418220 pod_ready.go:79] waiting up to 6m0s for pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace to be "Ready" ...
I0816 00:12:27.973570 2418220 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.169970424s)
I0816 00:12:27.973756 2418220 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (1.148610282s)
I0816 00:12:27.984090 2418220 out.go:177] * Enabled addons: storage-provisioner, default-storageclass
I0816 00:12:27.338503 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:29.338983 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:27.985790 2418220 addons.go:510] duration metric: took 1.580612175s for enable addons: enabled=[storage-provisioner default-storageclass]
I0816 00:12:28.201157 2418220 kapi.go:214] "coredns" deployment in "kube-system" namespace and "embed-certs-951478" context rescaled to 1 replicas
I0816 00:12:29.747298 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:32.246288 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:31.839009 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:34.338847 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:36.339743 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:34.246388 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:36.246902 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:38.839318 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:41.337738 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:38.247610 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:38.747518 2418220 pod_ready.go:98] pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace has status phase "Succeeded" (skipping!): {Phase:Succeeded Conditions:[{Type:PodReadyToStartContainers Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:38 +0000 UTC Reason: Message:} {Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason:PodCompleted Message:} {Type:Ready Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason:PodCompleted Message:} {Type:ContainersReady Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason:PodCompleted Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason: Message:}] Message: Reason: NominatedNodeName: HostIP:192.168.76.2 HostIPs:[{IP:192.168.76.2
}] PodIP:10.244.0.2 PodIPs:[{IP:10.244.0.2}] StartTime:2024-08-16 00:12:26 +0000 UTC InitContainerStatuses:[] ContainerStatuses:[{Name:coredns State:{Waiting:nil Running:nil Terminated:&ContainerStateTerminated{ExitCode:0,Signal:0,Reason:Completed,Message:,StartedAt:2024-08-16 00:12:27 +0000 UTC,FinishedAt:2024-08-16 00:12:38 +0000 UTC,ContainerID:docker://b746a62a75218259e51590ac7adc43053cf61855a3c65759fb80d271c15c076a,}} LastTerminationState:{Waiting:nil Running:nil Terminated:nil} Ready:false RestartCount:0 Image:registry.k8s.io/coredns/coredns:v1.11.1 ImageID:docker-pullable://registry.k8s.io/coredns/coredns@sha256:1eeb4c7316bacb1d4c8ead65571cd92dd21e27359f0d4917f1a5822a73b75db1 ContainerID:docker://b746a62a75218259e51590ac7adc43053cf61855a3c65759fb80d271c15c076a Started:0x4001e6ee00 AllocatedResources:map[] Resources:nil VolumeMounts:[{Name:config-volume MountPath:/etc/coredns ReadOnly:true RecursiveReadOnly:0x4001e45900} {Name:kube-api-access-jsvj4 MountPath:/var/run/secrets/kubernetes.io/serviceaccount
ReadOnly:true RecursiveReadOnly:0x4001e45910}] User:nil AllocatedResourcesStatus:[]}] QOSClass:Burstable EphemeralContainerStatuses:[] Resize: ResourceClaimStatuses:[]}
I0816 00:12:38.747550 2418220 pod_ready.go:82] duration metric: took 11.007037537s for pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace to be "Ready" ...
E0816 00:12:38.747563 2418220 pod_ready.go:67] WaitExtra: waitPodCondition: pod "coredns-6f6b679f8f-gx79d" in "kube-system" namespace has status phase "Succeeded" (skipping!): {Phase:Succeeded Conditions:[{Type:PodReadyToStartContainers Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:38 +0000 UTC Reason: Message:} {Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason:PodCompleted Message:} {Type:Ready Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason:PodCompleted Message:} {Type:ContainersReady Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason:PodCompleted Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-08-16 00:12:26 +0000 UTC Reason: Message:}] Message: Reason: NominatedNodeName: HostIP:192.168.7
6.2 HostIPs:[{IP:192.168.76.2}] PodIP:10.244.0.2 PodIPs:[{IP:10.244.0.2}] StartTime:2024-08-16 00:12:26 +0000 UTC InitContainerStatuses:[] ContainerStatuses:[{Name:coredns State:{Waiting:nil Running:nil Terminated:&ContainerStateTerminated{ExitCode:0,Signal:0,Reason:Completed,Message:,StartedAt:2024-08-16 00:12:27 +0000 UTC,FinishedAt:2024-08-16 00:12:38 +0000 UTC,ContainerID:docker://b746a62a75218259e51590ac7adc43053cf61855a3c65759fb80d271c15c076a,}} LastTerminationState:{Waiting:nil Running:nil Terminated:nil} Ready:false RestartCount:0 Image:registry.k8s.io/coredns/coredns:v1.11.1 ImageID:docker-pullable://registry.k8s.io/coredns/coredns@sha256:1eeb4c7316bacb1d4c8ead65571cd92dd21e27359f0d4917f1a5822a73b75db1 ContainerID:docker://b746a62a75218259e51590ac7adc43053cf61855a3c65759fb80d271c15c076a Started:0x4001e6ee00 AllocatedResources:map[] Resources:nil VolumeMounts:[{Name:config-volume MountPath:/etc/coredns ReadOnly:true RecursiveReadOnly:0x4001e45900} {Name:kube-api-access-jsvj4 MountPath:/var/run/secrets
/kubernetes.io/serviceaccount ReadOnly:true RecursiveReadOnly:0x4001e45910}] User:nil AllocatedResourcesStatus:[]}] QOSClass:Burstable EphemeralContainerStatuses:[] Resize: ResourceClaimStatuses:[]}
I0816 00:12:38.747571 2418220 pod_ready.go:79] waiting up to 6m0s for pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace to be "Ready" ...
I0816 00:12:40.753844 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:42.754578 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:43.338933 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:45.838487 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:45.255150 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:47.754713 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:47.840406 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:50.337729 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:50.254382 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:52.754042 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:52.337948 2407112 pod_ready.go:103] pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:52.337979 2407112 pod_ready.go:82] duration metric: took 4m0.00579824s for pod "metrics-server-9975d5f86-tt4kd" in "kube-system" namespace to be "Ready" ...
E0816 00:12:52.337989 2407112 pod_ready.go:67] WaitExtra: waitPodCondition: context deadline exceeded
I0816 00:12:52.337996 2407112 pod_ready.go:39] duration metric: took 5m29.714095578s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0816 00:12:52.338015 2407112 api_server.go:52] waiting for apiserver process to appear ...
I0816 00:12:52.338092 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
I0816 00:12:52.356755 2407112 logs.go:276] 2 containers: [682baec10b08 3d14903eaff5]
I0816 00:12:52.356831 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
I0816 00:12:52.374920 2407112 logs.go:276] 2 containers: [5aacba0afc73 15f34ed96b2b]
I0816 00:12:52.375012 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
I0816 00:12:52.394135 2407112 logs.go:276] 2 containers: [0646646e2348 f583e4715841]
I0816 00:12:52.394219 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
I0816 00:12:52.411860 2407112 logs.go:276] 2 containers: [67be7ec054c6 003fa784026a]
I0816 00:12:52.411939 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
I0816 00:12:52.430627 2407112 logs.go:276] 2 containers: [c5eeddd51e95 1e79f4e5d490]
I0816 00:12:52.430718 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
I0816 00:12:52.448774 2407112 logs.go:276] 2 containers: [cc3ceefdfcf9 821653363c67]
I0816 00:12:52.448883 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
I0816 00:12:52.467313 2407112 logs.go:276] 0 containers: []
W0816 00:12:52.467337 2407112 logs.go:278] No container was found matching "kindnet"
I0816 00:12:52.467406 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
I0816 00:12:52.488664 2407112 logs.go:276] 2 containers: [a6efcfb5cb17 de096650c620]
I0816 00:12:52.488759 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kubernetes-dashboard --format={{.ID}}
I0816 00:12:52.508042 2407112 logs.go:276] 1 containers: [3ef1e388df06]
I0816 00:12:52.508089 2407112 logs.go:123] Gathering logs for kube-apiserver [3d14903eaff5] ...
I0816 00:12:52.508101 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3d14903eaff5"
I0816 00:12:52.575393 2407112 logs.go:123] Gathering logs for etcd [15f34ed96b2b] ...
I0816 00:12:52.575429 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 15f34ed96b2b"
I0816 00:12:52.609501 2407112 logs.go:123] Gathering logs for kube-proxy [1e79f4e5d490] ...
I0816 00:12:52.609527 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 1e79f4e5d490"
I0816 00:12:52.632173 2407112 logs.go:123] Gathering logs for kube-controller-manager [cc3ceefdfcf9] ...
I0816 00:12:52.632251 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 cc3ceefdfcf9"
I0816 00:12:52.680493 2407112 logs.go:123] Gathering logs for Docker ...
I0816 00:12:52.680527 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I0816 00:12:52.708209 2407112 logs.go:123] Gathering logs for dmesg ...
I0816 00:12:52.708242 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I0816 00:12:52.725031 2407112 logs.go:123] Gathering logs for describe nodes ...
I0816 00:12:52.725068 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I0816 00:12:52.888699 2407112 logs.go:123] Gathering logs for kube-apiserver [682baec10b08] ...
I0816 00:12:52.888730 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 682baec10b08"
I0816 00:12:52.947378 2407112 logs.go:123] Gathering logs for etcd [5aacba0afc73] ...
I0816 00:12:52.947415 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 5aacba0afc73"
I0816 00:12:52.984633 2407112 logs.go:123] Gathering logs for kube-controller-manager [821653363c67] ...
I0816 00:12:52.984685 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 821653363c67"
I0816 00:12:53.041565 2407112 logs.go:123] Gathering logs for kubernetes-dashboard [3ef1e388df06] ...
I0816 00:12:53.041610 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3ef1e388df06"
I0816 00:12:53.063527 2407112 logs.go:123] Gathering logs for kubelet ...
I0816 00:12:53.063556 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W0816 00:12:53.119952 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.595590 1361 reflector.go:138] object-"kube-system"/"kube-proxy-token-7vfmt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kube-proxy-token-7vfmt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.120210 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.596830 1361 reflector.go:138] object-"kube-system"/"coredns": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.120436 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597148 1361 reflector.go:138] object-"kube-system"/"coredns-token-xzs4d": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "coredns-token-xzs4d" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.120668 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597346 1361 reflector.go:138] object-"kube-system"/"metrics-server-token-545hd": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "metrics-server-token-545hd" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.120885 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597537 1361 reflector.go:138] object-"kube-system"/"kube-proxy": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.121122 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597780 1361 reflector.go:138] object-"kube-system"/"storage-provisioner-token-7rcl5": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "storage-provisioner-token-7rcl5" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.121338 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597976 1361 reflector.go:138] object-"default"/"default-token-zv2bb": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "default-token-zv2bb" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.131353 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.865188 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:12:53.131864 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.961596 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.132568 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:27 old-k8s-version-894472 kubelet[1361]: E0816 00:07:27.069241 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.135723 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:40 old-k8s-version-894472 kubelet[1361]: E0816 00:07:40.936997 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:12:53.136063 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:41 old-k8s-version-894472 kubelet[1361]: E0816 00:07:41.174078 1361 reflector.go:138] object-"kubernetes-dashboard"/"kubernetes-dashboard-token-2w5nt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kubernetes-dashboard-token-2w5nt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kubernetes-dashboard": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:12:53.140916 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:48 old-k8s-version-894472 kubelet[1361]: E0816 00:07:48.480988 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:12:53.141483 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:49 old-k8s-version-894472 kubelet[1361]: E0816 00:07:49.497915 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.141711 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:52 old-k8s-version-894472 kubelet[1361]: E0816 00:07:52.902604 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.142184 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:56 old-k8s-version-894472 kubelet[1361]: E0816 00:07:56.570944 1361 pod_workers.go:191] Error syncing pod 3c69c2c8-274b-42e9-83f4-e56b1a377a84 ("storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 10s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"
W0816 00:12:53.144872 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.540574 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:12:53.147012 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.937109 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:12:53.147341 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.906978 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.147546 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.907490 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.147738 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:33 old-k8s-version-894472 kubelet[1361]: E0816 00:08:33.905951 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.150049 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:34 old-k8s-version-894472 kubelet[1361]: E0816 00:08:34.587830 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:12:53.150259 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.913192 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.152418 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.930752 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:12:53.152621 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:59 old-k8s-version-894472 kubelet[1361]: E0816 00:08:59.902173 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.152813 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:01 old-k8s-version-894472 kubelet[1361]: E0816 00:09:01.906082 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.153018 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:10 old-k8s-version-894472 kubelet[1361]: E0816 00:09:10.903130 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.153211 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:13 old-k8s-version-894472 kubelet[1361]: E0816 00:09:13.910937 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.155518 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:22 old-k8s-version-894472 kubelet[1361]: E0816 00:09:22.525085 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:12:53.155709 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:28 old-k8s-version-894472 kubelet[1361]: E0816 00:09:28.902818 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.155918 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:36 old-k8s-version-894472 kubelet[1361]: E0816 00:09:36.902337 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.156108 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:42 old-k8s-version-894472 kubelet[1361]: E0816 00:09:42.908410 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.156310 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:50 old-k8s-version-894472 kubelet[1361]: E0816 00:09:50.917752 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.156500 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:53 old-k8s-version-894472 kubelet[1361]: E0816 00:09:53.902385 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.156706 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:02 old-k8s-version-894472 kubelet[1361]: E0816 00:10:02.902250 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.156909 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:07 old-k8s-version-894472 kubelet[1361]: E0816 00:10:07.902036 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.157115 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:13 old-k8s-version-894472 kubelet[1361]: E0816 00:10:13.908849 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.159233 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:22 old-k8s-version-894472 kubelet[1361]: E0816 00:10:22.924390 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:12:53.159438 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:25 old-k8s-version-894472 kubelet[1361]: E0816 00:10:25.902239 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.159639 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.903386 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.159827 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.904171 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.160016 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:51 old-k8s-version-894472 kubelet[1361]: E0816 00:10:51.902209 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.162349 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458905 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:12:53.162553 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.902219 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.162745 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.905343 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.162934 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:16 old-k8s-version-894472 kubelet[1361]: E0816 00:11:16.908006 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.163136 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:17 old-k8s-version-894472 kubelet[1361]: E0816 00:11:17.912657 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.163324 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:31 old-k8s-version-894472 kubelet[1361]: E0816 00:11:31.902307 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.163546 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:32 old-k8s-version-894472 kubelet[1361]: E0816 00:11:32.902285 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.163735 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:43 old-k8s-version-894472 kubelet[1361]: E0816 00:11:43.902913 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.163937 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:47 old-k8s-version-894472 kubelet[1361]: E0816 00:11:47.902276 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.164127 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:56 old-k8s-version-894472 kubelet[1361]: E0816 00:11:56.907938 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.164331 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:01 old-k8s-version-894472 kubelet[1361]: E0816 00:12:01.910769 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.164519 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:07 old-k8s-version-894472 kubelet[1361]: E0816 00:12:07.902409 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.164729 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:12 old-k8s-version-894472 kubelet[1361]: E0816 00:12:12.925048 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.164919 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:18 old-k8s-version-894472 kubelet[1361]: E0816 00:12:18.903513 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.165121 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.165309 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.165511 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.165721 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.165909 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I0816 00:12:53.165921 2407112 logs.go:123] Gathering logs for coredns [f583e4715841] ...
I0816 00:12:53.165936 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 f583e4715841"
I0816 00:12:53.191198 2407112 logs.go:123] Gathering logs for container status ...
I0816 00:12:53.191227 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0816 00:12:53.281304 2407112 logs.go:123] Gathering logs for coredns [0646646e2348] ...
I0816 00:12:53.281343 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 0646646e2348"
I0816 00:12:53.302996 2407112 logs.go:123] Gathering logs for kube-scheduler [003fa784026a] ...
I0816 00:12:53.303072 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 003fa784026a"
I0816 00:12:53.328690 2407112 logs.go:123] Gathering logs for kube-proxy [c5eeddd51e95] ...
I0816 00:12:53.328718 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 c5eeddd51e95"
I0816 00:12:53.351111 2407112 logs.go:123] Gathering logs for storage-provisioner [a6efcfb5cb17] ...
I0816 00:12:53.351142 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a6efcfb5cb17"
I0816 00:12:53.372302 2407112 logs.go:123] Gathering logs for storage-provisioner [de096650c620] ...
I0816 00:12:53.372331 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 de096650c620"
I0816 00:12:53.392435 2407112 logs.go:123] Gathering logs for kube-scheduler [67be7ec054c6] ...
I0816 00:12:53.392464 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 67be7ec054c6"
I0816 00:12:53.414931 2407112 out.go:358] Setting ErrFile to fd 2...
I0816 00:12:53.414956 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
W0816 00:12:53.415035 2407112 out.go:270] X Problems detected in kubelet:
W0816 00:12:53.415081 2407112 out.go:270] Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.415095 2407112 out.go:270] Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.415117 2407112 out.go:270] Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.415131 2407112 out.go:270] Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:12:53.415139 2407112 out.go:270] Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I0816 00:12:53.415178 2407112 out.go:358] Setting ErrFile to fd 2...
I0816 00:12:53.415187 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0816 00:12:54.754245 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:57.253340 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
I0816 00:12:59.253395 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
I0816 00:13:01.254735 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
I0816 00:13:03.417016 2407112 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0816 00:13:03.428859 2407112 api_server.go:72] duration metric: took 5m57.651838639s to wait for apiserver process to appear ...
I0816 00:13:03.428887 2407112 api_server.go:88] waiting for apiserver healthz status ...
I0816 00:13:03.428962 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
I0816 00:13:03.446552 2407112 logs.go:276] 2 containers: [682baec10b08 3d14903eaff5]
I0816 00:13:03.446627 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
I0816 00:13:03.463688 2407112 logs.go:276] 2 containers: [5aacba0afc73 15f34ed96b2b]
I0816 00:13:03.463770 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
I0816 00:13:03.485222 2407112 logs.go:276] 2 containers: [0646646e2348 f583e4715841]
I0816 00:13:03.485299 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
I0816 00:13:03.503320 2407112 logs.go:276] 2 containers: [67be7ec054c6 003fa784026a]
I0816 00:13:03.503399 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
I0816 00:13:03.522597 2407112 logs.go:276] 2 containers: [c5eeddd51e95 1e79f4e5d490]
I0816 00:13:03.522681 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
I0816 00:13:03.542588 2407112 logs.go:276] 2 containers: [cc3ceefdfcf9 821653363c67]
I0816 00:13:03.542672 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
I0816 00:13:03.566735 2407112 logs.go:276] 0 containers: []
W0816 00:13:03.566759 2407112 logs.go:278] No container was found matching "kindnet"
I0816 00:13:03.566817 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kubernetes-dashboard --format={{.ID}}
I0816 00:13:03.585575 2407112 logs.go:276] 1 containers: [3ef1e388df06]
I0816 00:13:03.585709 2407112 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
I0816 00:13:03.604527 2407112 logs.go:276] 2 containers: [a6efcfb5cb17 de096650c620]
I0816 00:13:03.604570 2407112 logs.go:123] Gathering logs for container status ...
I0816 00:13:03.604588 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0816 00:13:03.669008 2407112 logs.go:123] Gathering logs for kube-apiserver [682baec10b08] ...
I0816 00:13:03.669037 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 682baec10b08"
I0816 00:13:03.712566 2407112 logs.go:123] Gathering logs for etcd [5aacba0afc73] ...
I0816 00:13:03.712600 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 5aacba0afc73"
I0816 00:13:03.737934 2407112 logs.go:123] Gathering logs for etcd [15f34ed96b2b] ...
I0816 00:13:03.738002 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 15f34ed96b2b"
I0816 00:13:03.771409 2407112 logs.go:123] Gathering logs for coredns [f583e4715841] ...
I0816 00:13:03.771482 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 f583e4715841"
I0816 00:13:03.797096 2407112 logs.go:123] Gathering logs for kube-scheduler [67be7ec054c6] ...
I0816 00:13:03.797128 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 67be7ec054c6"
I0816 00:13:03.826332 2407112 logs.go:123] Gathering logs for kube-controller-manager [cc3ceefdfcf9] ...
I0816 00:13:03.826367 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 cc3ceefdfcf9"
I0816 00:13:03.865698 2407112 logs.go:123] Gathering logs for Docker ...
I0816 00:13:03.865729 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I0816 00:13:03.892350 2407112 logs.go:123] Gathering logs for kubelet ...
I0816 00:13:03.892378 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W0816 00:13:03.954017 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.595590 1361 reflector.go:138] object-"kube-system"/"kube-proxy-token-7vfmt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kube-proxy-token-7vfmt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.954368 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.596830 1361 reflector.go:138] object-"kube-system"/"coredns": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.954651 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597148 1361 reflector.go:138] object-"kube-system"/"coredns-token-xzs4d": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "coredns-token-xzs4d" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.954880 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597346 1361 reflector.go:138] object-"kube-system"/"metrics-server-token-545hd": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "metrics-server-token-545hd" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.955093 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597537 1361 reflector.go:138] object-"kube-system"/"kube-proxy": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-proxy" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.955328 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597780 1361 reflector.go:138] object-"kube-system"/"storage-provisioner-token-7rcl5": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "storage-provisioner-token-7rcl5" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kube-system": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.955543 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:22 old-k8s-version-894472 kubelet[1361]: E0816 00:07:22.597976 1361 reflector.go:138] object-"default"/"default-token-zv2bb": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "default-token-zv2bb" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.966447 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.865188 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:13:03.967038 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:25 old-k8s-version-894472 kubelet[1361]: E0816 00:07:25.961596 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.967719 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:27 old-k8s-version-894472 kubelet[1361]: E0816 00:07:27.069241 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.970902 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:40 old-k8s-version-894472 kubelet[1361]: E0816 00:07:40.936997 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:13:03.971269 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:41 old-k8s-version-894472 kubelet[1361]: E0816 00:07:41.174078 1361 reflector.go:138] object-"kubernetes-dashboard"/"kubernetes-dashboard-token-2w5nt": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets "kubernetes-dashboard-token-2w5nt" is forbidden: User "system:node:old-k8s-version-894472" cannot list resource "secrets" in API group "" in the namespace "kubernetes-dashboard": no relationship found between node 'old-k8s-version-894472' and this object
W0816 00:13:03.975634 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:48 old-k8s-version-894472 kubelet[1361]: E0816 00:07:48.480988 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:13:03.976189 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:49 old-k8s-version-894472 kubelet[1361]: E0816 00:07:49.497915 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.976380 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:52 old-k8s-version-894472 kubelet[1361]: E0816 00:07:52.902604 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.976839 2407112 logs.go:138] Found kubelet problem: Aug 16 00:07:56 old-k8s-version-894472 kubelet[1361]: E0816 00:07:56.570944 1361 pod_workers.go:191] Error syncing pod 3c69c2c8-274b-42e9-83f4-e56b1a377a84 ("storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"), skipping: failed to "StartContainer" for "storage-provisioner" with CrashLoopBackOff: "back-off 10s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(3c69c2c8-274b-42e9-83f4-e56b1a377a84)"
W0816 00:13:03.979485 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.540574 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:13:03.981633 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:04 old-k8s-version-894472 kubelet[1361]: E0816 00:08:04.937109 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:13:03.981960 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.906978 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.982163 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:18 old-k8s-version-894472 kubelet[1361]: E0816 00:08:18.907490 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.982356 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:33 old-k8s-version-894472 kubelet[1361]: E0816 00:08:33.905951 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.984654 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:34 old-k8s-version-894472 kubelet[1361]: E0816 00:08:34.587830 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:13:03.984859 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.913192 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.986993 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:48 old-k8s-version-894472 kubelet[1361]: E0816 00:08:48.930752 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:13:03.987196 2407112 logs.go:138] Found kubelet problem: Aug 16 00:08:59 old-k8s-version-894472 kubelet[1361]: E0816 00:08:59.902173 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.987384 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:01 old-k8s-version-894472 kubelet[1361]: E0816 00:09:01.906082 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.987586 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:10 old-k8s-version-894472 kubelet[1361]: E0816 00:09:10.903130 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.987776 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:13 old-k8s-version-894472 kubelet[1361]: E0816 00:09:13.910937 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.990067 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:22 old-k8s-version-894472 kubelet[1361]: E0816 00:09:22.525085 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:13:03.990260 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:28 old-k8s-version-894472 kubelet[1361]: E0816 00:09:28.902818 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.990464 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:36 old-k8s-version-894472 kubelet[1361]: E0816 00:09:36.902337 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.990653 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:42 old-k8s-version-894472 kubelet[1361]: E0816 00:09:42.908410 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.990862 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:50 old-k8s-version-894472 kubelet[1361]: E0816 00:09:50.917752 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.991053 2407112 logs.go:138] Found kubelet problem: Aug 16 00:09:53 old-k8s-version-894472 kubelet[1361]: E0816 00:09:53.902385 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.991255 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:02 old-k8s-version-894472 kubelet[1361]: E0816 00:10:02.902250 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.991445 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:07 old-k8s-version-894472 kubelet[1361]: E0816 00:10:07.902036 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.991646 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:13 old-k8s-version-894472 kubelet[1361]: E0816 00:10:13.908849 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.993804 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:22 old-k8s-version-894472 kubelet[1361]: E0816 00:10:22.924390 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W0816 00:13:03.994007 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:25 old-k8s-version-894472 kubelet[1361]: E0816 00:10:25.902239 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.994210 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.903386 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.994398 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:37 old-k8s-version-894472 kubelet[1361]: E0816 00:10:37.904171 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.994591 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:51 old-k8s-version-894472 kubelet[1361]: E0816 00:10:51.902209 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.996873 2407112 logs.go:138] Found kubelet problem: Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458905 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
W0816 00:13:03.997075 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.902219 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.997263 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.905343 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.997451 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:16 old-k8s-version-894472 kubelet[1361]: E0816 00:11:16.908006 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.997659 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:17 old-k8s-version-894472 kubelet[1361]: E0816 00:11:17.912657 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.997848 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:31 old-k8s-version-894472 kubelet[1361]: E0816 00:11:31.902307 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.998049 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:32 old-k8s-version-894472 kubelet[1361]: E0816 00:11:32.902285 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.998237 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:43 old-k8s-version-894472 kubelet[1361]: E0816 00:11:43.902913 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.998439 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:47 old-k8s-version-894472 kubelet[1361]: E0816 00:11:47.902276 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.998629 2407112 logs.go:138] Found kubelet problem: Aug 16 00:11:56 old-k8s-version-894472 kubelet[1361]: E0816 00:11:56.907938 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.998831 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:01 old-k8s-version-894472 kubelet[1361]: E0816 00:12:01.910769 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.999022 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:07 old-k8s-version-894472 kubelet[1361]: E0816 00:12:07.902409 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.999225 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:12 old-k8s-version-894472 kubelet[1361]: E0816 00:12:12.925048 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.999414 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:18 old-k8s-version-894472 kubelet[1361]: E0816 00:12:18.903513 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.999616 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:03.999804 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.000006 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.000207 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.000397 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.000599 2407112 logs.go:138] Found kubelet problem: Aug 16 00:12:58 old-k8s-version-894472 kubelet[1361]: E0816 00:12:58.902545 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.000789 2407112 logs.go:138] Found kubelet problem: Aug 16 00:13:02 old-k8s-version-894472 kubelet[1361]: E0816 00:13:02.903209 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I0816 00:13:04.000801 2407112 logs.go:123] Gathering logs for dmesg ...
I0816 00:13:04.000816 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I0816 00:13:04.020061 2407112 logs.go:123] Gathering logs for kubernetes-dashboard [3ef1e388df06] ...
I0816 00:13:04.020091 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3ef1e388df06"
I0816 00:13:04.044867 2407112 logs.go:123] Gathering logs for storage-provisioner [a6efcfb5cb17] ...
I0816 00:13:04.044946 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a6efcfb5cb17"
I0816 00:13:04.076966 2407112 logs.go:123] Gathering logs for describe nodes ...
I0816 00:13:04.077044 2407112 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I0816 00:13:04.233528 2407112 logs.go:123] Gathering logs for coredns [0646646e2348] ...
I0816 00:13:04.233554 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 0646646e2348"
I0816 00:13:04.259485 2407112 logs.go:123] Gathering logs for kube-proxy [1e79f4e5d490] ...
I0816 00:13:04.259515 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 1e79f4e5d490"
I0816 00:13:04.286001 2407112 logs.go:123] Gathering logs for kube-apiserver [3d14903eaff5] ...
I0816 00:13:04.286028 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 3d14903eaff5"
I0816 00:13:04.367586 2407112 logs.go:123] Gathering logs for kube-scheduler [003fa784026a] ...
I0816 00:13:04.367621 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 003fa784026a"
I0816 00:13:04.398199 2407112 logs.go:123] Gathering logs for kube-proxy [c5eeddd51e95] ...
I0816 00:13:04.398227 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 c5eeddd51e95"
I0816 00:13:04.420433 2407112 logs.go:123] Gathering logs for kube-controller-manager [821653363c67] ...
I0816 00:13:04.420463 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 821653363c67"
I0816 00:13:04.480676 2407112 logs.go:123] Gathering logs for storage-provisioner [de096650c620] ...
I0816 00:13:04.480722 2407112 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 de096650c620"
I0816 00:13:04.501321 2407112 out.go:358] Setting ErrFile to fd 2...
I0816 00:13:04.501350 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
W0816 00:13:04.501418 2407112 out.go:270] X Problems detected in kubelet:
W0816 00:13:04.501441 2407112 out.go:270] Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.501458 2407112 out.go:270] Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.501468 2407112 out.go:270] Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.501473 2407112 out.go:270] Aug 16 00:12:58 old-k8s-version-894472 kubelet[1361]: E0816 00:12:58.902545 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
W0816 00:13:04.501479 2407112 out.go:270] Aug 16 00:13:02 old-k8s-version-894472 kubelet[1361]: E0816 00:13:02.903209 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I0816 00:13:04.501496 2407112 out.go:358] Setting ErrFile to fd 2...
I0816 00:13:04.501501 2407112 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0816 00:13:03.753991 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
I0816 00:13:06.254027 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
I0816 00:13:08.254181 2418220 pod_ready.go:103] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"False"
I0816 00:13:09.253067 2418220 pod_ready.go:93] pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace has status "Ready":"True"
I0816 00:13:09.253094 2418220 pod_ready.go:82] duration metric: took 30.505514285s for pod "coredns-6f6b679f8f-s552c" in "kube-system" namespace to be "Ready" ...
I0816 00:13:09.253108 2418220 pod_ready.go:79] waiting up to 6m0s for pod "etcd-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
I0816 00:13:09.258028 2418220 pod_ready.go:93] pod "etcd-embed-certs-951478" in "kube-system" namespace has status "Ready":"True"
I0816 00:13:09.258053 2418220 pod_ready.go:82] duration metric: took 4.911629ms for pod "etcd-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
I0816 00:13:09.258064 2418220 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
I0816 00:13:09.263121 2418220 pod_ready.go:93] pod "kube-apiserver-embed-certs-951478" in "kube-system" namespace has status "Ready":"True"
I0816 00:13:09.263149 2418220 pod_ready.go:82] duration metric: took 5.075211ms for pod "kube-apiserver-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
I0816 00:13:09.263160 2418220 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
I0816 00:13:09.268055 2418220 pod_ready.go:93] pod "kube-controller-manager-embed-certs-951478" in "kube-system" namespace has status "Ready":"True"
I0816 00:13:09.268076 2418220 pod_ready.go:82] duration metric: took 4.908872ms for pod "kube-controller-manager-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
I0816 00:13:09.268087 2418220 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-7wwq6" in "kube-system" namespace to be "Ready" ...
I0816 00:13:09.273803 2418220 pod_ready.go:93] pod "kube-proxy-7wwq6" in "kube-system" namespace has status "Ready":"True"
I0816 00:13:09.273831 2418220 pod_ready.go:82] duration metric: took 5.7373ms for pod "kube-proxy-7wwq6" in "kube-system" namespace to be "Ready" ...
I0816 00:13:09.273843 2418220 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
I0816 00:13:09.651423 2418220 pod_ready.go:93] pod "kube-scheduler-embed-certs-951478" in "kube-system" namespace has status "Ready":"True"
I0816 00:13:09.651449 2418220 pod_ready.go:82] duration metric: took 377.579145ms for pod "kube-scheduler-embed-certs-951478" in "kube-system" namespace to be "Ready" ...
I0816 00:13:09.651461 2418220 pod_ready.go:39] duration metric: took 41.931674932s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0816 00:13:09.651480 2418220 api_server.go:52] waiting for apiserver process to appear ...
I0816 00:13:09.651558 2418220 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0816 00:13:09.663678 2418220 api_server.go:72] duration metric: took 43.258913829s to wait for apiserver process to appear ...
I0816 00:13:09.663702 2418220 api_server.go:88] waiting for apiserver healthz status ...
I0816 00:13:09.663726 2418220 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I0816 00:13:09.672527 2418220 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I0816 00:13:09.673658 2418220 api_server.go:141] control plane version: v1.31.0
I0816 00:13:09.673719 2418220 api_server.go:131] duration metric: took 10.010265ms to wait for apiserver health ...
I0816 00:13:09.673743 2418220 system_pods.go:43] waiting for kube-system pods to appear ...
I0816 00:13:09.854087 2418220 system_pods.go:59] 7 kube-system pods found
I0816 00:13:09.854122 2418220 system_pods.go:61] "coredns-6f6b679f8f-s552c" [32775d66-0ddd-4cc8-bf05-f0cb75e9a8ac] Running
I0816 00:13:09.854129 2418220 system_pods.go:61] "etcd-embed-certs-951478" [3f6bf602-b1c9-40ad-a396-83b58b0a2343] Running
I0816 00:13:09.854133 2418220 system_pods.go:61] "kube-apiserver-embed-certs-951478" [ecb86650-3921-442b-af91-729bad2fcf40] Running
I0816 00:13:09.854138 2418220 system_pods.go:61] "kube-controller-manager-embed-certs-951478" [53d4d44c-f816-46f6-8cbb-1444e8e7f574] Running
I0816 00:13:09.854142 2418220 system_pods.go:61] "kube-proxy-7wwq6" [bf244e41-6f4a-453f-a049-3978b84f7a6e] Running
I0816 00:13:09.854146 2418220 system_pods.go:61] "kube-scheduler-embed-certs-951478" [fe82d9af-0754-4162-b906-b9b03fc21a2b] Running
I0816 00:13:09.854152 2418220 system_pods.go:61] "storage-provisioner" [0514920d-c907-40ed-b2d7-a27c32f0a5dd] Running
I0816 00:13:09.854158 2418220 system_pods.go:74] duration metric: took 180.396959ms to wait for pod list to return data ...
I0816 00:13:09.854167 2418220 default_sa.go:34] waiting for default service account to be created ...
I0816 00:13:10.051804 2418220 default_sa.go:45] found service account: "default"
I0816 00:13:10.051834 2418220 default_sa.go:55] duration metric: took 197.657145ms for default service account to be created ...
I0816 00:13:10.051845 2418220 system_pods.go:116] waiting for k8s-apps to be running ...
I0816 00:13:10.254618 2418220 system_pods.go:86] 7 kube-system pods found
I0816 00:13:10.254678 2418220 system_pods.go:89] "coredns-6f6b679f8f-s552c" [32775d66-0ddd-4cc8-bf05-f0cb75e9a8ac] Running
I0816 00:13:10.254718 2418220 system_pods.go:89] "etcd-embed-certs-951478" [3f6bf602-b1c9-40ad-a396-83b58b0a2343] Running
I0816 00:13:10.254737 2418220 system_pods.go:89] "kube-apiserver-embed-certs-951478" [ecb86650-3921-442b-af91-729bad2fcf40] Running
I0816 00:13:10.254752 2418220 system_pods.go:89] "kube-controller-manager-embed-certs-951478" [53d4d44c-f816-46f6-8cbb-1444e8e7f574] Running
I0816 00:13:10.254763 2418220 system_pods.go:89] "kube-proxy-7wwq6" [bf244e41-6f4a-453f-a049-3978b84f7a6e] Running
I0816 00:13:10.254772 2418220 system_pods.go:89] "kube-scheduler-embed-certs-951478" [fe82d9af-0754-4162-b906-b9b03fc21a2b] Running
I0816 00:13:10.254777 2418220 system_pods.go:89] "storage-provisioner" [0514920d-c907-40ed-b2d7-a27c32f0a5dd] Running
I0816 00:13:10.254802 2418220 system_pods.go:126] duration metric: took 202.934402ms to wait for k8s-apps to be running ...
I0816 00:13:10.254821 2418220 system_svc.go:44] waiting for kubelet service to be running ....
I0816 00:13:10.254897 2418220 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0816 00:13:10.271059 2418220 system_svc.go:56] duration metric: took 16.228932ms WaitForService to wait for kubelet
I0816 00:13:10.271088 2418220 kubeadm.go:582] duration metric: took 43.866329484s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0816 00:13:10.271138 2418220 node_conditions.go:102] verifying NodePressure condition ...
I0816 00:13:10.451670 2418220 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I0816 00:13:10.451702 2418220 node_conditions.go:123] node cpu capacity is 2
I0816 00:13:10.451714 2418220 node_conditions.go:105] duration metric: took 180.570519ms to run NodePressure ...
I0816 00:13:10.451745 2418220 start.go:241] waiting for startup goroutines ...
I0816 00:13:10.451763 2418220 start.go:246] waiting for cluster config update ...
I0816 00:13:10.451774 2418220 start.go:255] writing updated cluster config ...
I0816 00:13:10.452065 2418220 ssh_runner.go:195] Run: rm -f paused
I0816 00:13:10.508487 2418220 start.go:600] kubectl: 1.31.0, cluster: 1.31.0 (minor skew: 0)
I0816 00:13:10.511115 2418220 out.go:177] * Done! kubectl is now configured to use "embed-certs-951478" cluster and "default" namespace by default
I0816 00:13:14.507900 2407112 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I0816 00:13:14.517839 2407112 api_server.go:279] https://192.168.85.2:8443/healthz returned 200:
ok
I0816 00:13:14.520292 2407112 out.go:201]
W0816 00:13:14.522384 2407112 out.go:270] X Exiting due to K8S_UNHEALTHY_CONTROL_PLANE: wait 6m0s for node: wait for healthy API server: controlPlane never updated to v1.20.0
W0816 00:13:14.522420 2407112 out.go:270] * Suggestion: Control Plane could not update, try minikube delete --all --purge
W0816 00:13:14.522438 2407112 out.go:270] * Related issue: https://github.com/kubernetes/minikube/issues/11417
W0816 00:13:14.522444 2407112 out.go:270] *
W0816 00:13:14.523417 2407112 out.go:293] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
I0816 00:13:14.525402 2407112 out.go:201]
==> Docker <==
Aug 16 00:08:04 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:04.241144445Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Aug 16 00:08:04 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:04.537789089Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Aug 16 00:08:04 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:04.537903195Z" level=warning msg="[DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Aug 16 00:08:04 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:04.537937237Z" level=info msg="Attempting next endpoint for pull after error: [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Aug 16 00:08:04 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:04.919362673Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
Aug 16 00:08:04 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:04.919682493Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
Aug 16 00:08:04 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:04.927444655Z" level=error msg="Handler for POST /v1.40/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
Aug 16 00:08:34 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:34.286531977Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Aug 16 00:08:34 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:34.584806623Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Aug 16 00:08:34 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:34.584908578Z" level=warning msg="[DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Aug 16 00:08:34 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:34.584940224Z" level=info msg="Attempting next endpoint for pull after error: [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Aug 16 00:08:48 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:48.927115931Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
Aug 16 00:08:48 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:48.927565438Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
Aug 16 00:08:48 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:08:48.930109788Z" level=error msg="Handler for POST /v1.40/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
Aug 16 00:09:22 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:09:22.242821029Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Aug 16 00:09:22 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:09:22.522302090Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Aug 16 00:09:22 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:09:22.522416410Z" level=warning msg="[DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Aug 16 00:09:22 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:09:22.522448885Z" level=info msg="Attempting next endpoint for pull after error: [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Aug 16 00:10:22 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:10:22.919793702Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
Aug 16 00:10:22 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:10:22.919838025Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
Aug 16 00:10:22 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:10:22.923647026Z" level=error msg="Handler for POST /v1.40/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
Aug 16 00:10:53 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:10:53.154537815Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Aug 16 00:10:53 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:10:53.456017089Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Aug 16 00:10:53 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:10:53.456113037Z" level=warning msg="[DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Aug 16 00:10:53 old-k8s-version-894472 dockerd[1074]: time="2024-08-16T00:10:53.456152683Z" level=info msg="Attempting next endpoint for pull after error: [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
a6efcfb5cb17e ba04bb24b9575 5 minutes ago Running storage-provisioner 2 b2d7da83399c4 storage-provisioner
3ef1e388df06a kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93 5 minutes ago Running kubernetes-dashboard 0 d92212b423133 kubernetes-dashboard-cd95d586-k7zq2
0646646e23483 db91994f4ee8f 5 minutes ago Running coredns 1 cd221ea4db9ee coredns-74ff55c5b-jrtq9
de096650c6200 ba04bb24b9575 5 minutes ago Exited storage-provisioner 1 b2d7da83399c4 storage-provisioner
0a4983d325c06 1611cd07b61d5 5 minutes ago Running busybox 1 af72f54bb71ea busybox
c5eeddd51e956 25a5233254979 5 minutes ago Running kube-proxy 1 03bef86fbdbfd kube-proxy-4n8ls
682baec10b080 2c08bbbc02d3a 6 minutes ago Running kube-apiserver 1 f451c1ef1172b kube-apiserver-old-k8s-version-894472
5aacba0afc730 05b738aa1bc63 6 minutes ago Running etcd 1 1220260c41c8e etcd-old-k8s-version-894472
cc3ceefdfcf91 1df8a2b116bd1 6 minutes ago Running kube-controller-manager 1 49abbaab55b55 kube-controller-manager-old-k8s-version-894472
67be7ec054c6a e7605f88f17d6 6 minutes ago Running kube-scheduler 1 adb7fa2a5b6ad kube-scheduler-old-k8s-version-894472
77aa51e8bfe29 gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e 6 minutes ago Exited busybox 0 07ecfca802b01 busybox
f583e47158417 db91994f4ee8f 8 minutes ago Exited coredns 0 c1b510b003f6e coredns-74ff55c5b-jrtq9
1e79f4e5d490f 25a5233254979 8 minutes ago Exited kube-proxy 0 66481e83ee2c5 kube-proxy-4n8ls
003fa784026ad e7605f88f17d6 8 minutes ago Exited kube-scheduler 0 49887af6e9c3f kube-scheduler-old-k8s-version-894472
821653363c672 1df8a2b116bd1 8 minutes ago Exited kube-controller-manager 0 c1c603fc6e77b kube-controller-manager-old-k8s-version-894472
3d14903eaff5d 2c08bbbc02d3a 8 minutes ago Exited kube-apiserver 0 857b0611a6255 kube-apiserver-old-k8s-version-894472
15f34ed96b2b9 05b738aa1bc63 8 minutes ago Exited etcd 0 50f3363af98c7 etcd-old-k8s-version-894472
==> coredns [0646646e2348] <==
.:53
[INFO] plugin/reload: Running configuration MD5 = 093a0bf1423dd8c4eee62372bb216168
CoreDNS-1.7.0
linux/arm64, go1.14.4, f59c03d
[INFO] 127.0.0.1:35652 - 19807 "HINFO IN 5672222126629147568.7702291352317514369. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.049405724s
==> coredns [f583e4715841] <==
.:53
[INFO] plugin/reload: Running configuration MD5 = db32ca3650231d74073ff4cf814959a7
CoreDNS-1.7.0
linux/arm64, go1.14.4, f59c03d
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] Reloading
[INFO] plugin/health: Going into lameduck mode for 5s
[INFO] plugin/reload: Running configuration MD5 = 093a0bf1423dd8c4eee62372bb216168
[INFO] Reloading complete
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
I0816 00:05:39.089365 1 trace.go:116] Trace[2019727887]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125 (started: 2024-08-16 00:05:09.088876528 +0000 UTC m=+0.041772691) (total time: 30.000384344s):
Trace[2019727887]: [30.000384344s] [30.000384344s] END
E0816 00:05:39.089390 1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
I0816 00:05:39.089584 1 trace.go:116] Trace[939984059]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125 (started: 2024-08-16 00:05:09.089389082 +0000 UTC m=+0.042285245) (total time: 30.000182043s):
Trace[939984059]: [30.000182043s] [30.000182043s] END
E0816 00:05:39.089590 1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Endpoints: Get "https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
I0816 00:05:39.090074 1 trace.go:116] Trace[911902081]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125 (started: 2024-08-16 00:05:09.088493473 +0000 UTC m=+0.041389636) (total time: 30.001562843s):
Trace[911902081]: [30.001562843s] [30.001562843s] END
E0816 00:05:39.090082 1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
E0816 00:06:45.046755 1 reflector.go:382] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to watch *v1.Service: Get "https://10.96.0.1:443/api/v1/services?allowWatchBookmarks=true&resourceVersion=587&timeout=6m5s&timeoutSeconds=365&watch=true": dial tcp 10.96.0.1:443: connect: connection refused
E0816 00:06:45.046804 1 reflector.go:382] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to watch *v1.Endpoints: Get "https://10.96.0.1:443/api/v1/endpoints?allowWatchBookmarks=true&resourceVersion=589&timeout=8m23s&timeoutSeconds=503&watch=true": dial tcp 10.96.0.1:443: connect: connection refused
E0816 00:06:45.046846 1 reflector.go:382] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to watch *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?allowWatchBookmarks=true&resourceVersion=200&timeout=6m27s&timeoutSeconds=387&watch=true": dial tcp 10.96.0.1:443: connect: connection refused
==> describe nodes <==
Name: old-k8s-version-894472
Roles: control-plane,master
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=old-k8s-version-894472
kubernetes.io/os=linux
minikube.k8s.io/commit=fe9c1d9e27059a205b0df8e5e482803b65ef8774
minikube.k8s.io/name=old-k8s-version-894472
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_08_16T00_04_51_0700
minikube.k8s.io/version=v1.33.1
node-role.kubernetes.io/control-plane=
node-role.kubernetes.io/master=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 16 Aug 2024 00:04:48 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-894472
AcquireTime: <unset>
RenewTime: Fri, 16 Aug 2024 00:13:15 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 16 Aug 2024 00:13:15 +0000 Fri, 16 Aug 2024 00:04:39 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 16 Aug 2024 00:13:15 +0000 Fri, 16 Aug 2024 00:04:39 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 16 Aug 2024 00:13:15 +0000 Fri, 16 Aug 2024 00:04:39 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 16 Aug 2024 00:13:15 +0000 Fri, 16 Aug 2024 00:05:05 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.85.2
Hostname: old-k8s-version-894472
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022364Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022364Ki
pods: 110
System Info:
Machine ID: 712e30e2bb5b4aa5b88ceeda6cdeea71
System UUID: 3de89e43-0f81-4af5-9a27-89ca1d31f15a
Boot ID: cc0e1141-aa97-44ec-a7be-f3cd9b66c5f7
Kernel Version: 5.15.0-1067-aws
OS Image: Ubuntu 22.04.4 LTS
Operating System: linux
Architecture: arm64
Container Runtime Version: docker://27.1.2
Kubelet Version: v1.20.0
Kube-Proxy Version: v1.20.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m41s
kube-system coredns-74ff55c5b-jrtq9 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 8m9s
kube-system etcd-old-k8s-version-894472 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 8m20s
kube-system kube-apiserver-old-k8s-version-894472 250m (12%) 0 (0%) 0 (0%) 0 (0%) 8m20s
kube-system kube-controller-manager-old-k8s-version-894472 200m (10%) 0 (0%) 0 (0%) 0 (0%) 8m20s
kube-system kube-proxy-4n8ls 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m9s
kube-system kube-scheduler-old-k8s-version-894472 100m (5%) 0 (0%) 0 (0%) 0 (0%) 8m20s
kube-system metrics-server-9975d5f86-tt4kd 100m (5%) 0 (0%) 200Mi (2%) 0 (0%) 6m31s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m6s
kubernetes-dashboard dashboard-metrics-scraper-8d5bb5db8-vst6j 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m34s
kubernetes-dashboard kubernetes-dashboard-cd95d586-k7zq2 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m34s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 0 (0%)
memory 370Mi (4%) 170Mi (2%)
ephemeral-storage 100Mi (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 8m37s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 8m37s (x5 over 8m37s) kubelet Node old-k8s-version-894472 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 8m37s (x5 over 8m37s) kubelet Node old-k8s-version-894472 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 8m37s (x5 over 8m37s) kubelet Node old-k8s-version-894472 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 8m37s kubelet Updated Node Allocatable limit across pods
Normal Starting 8m21s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 8m21s kubelet Node old-k8s-version-894472 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 8m21s kubelet Node old-k8s-version-894472 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 8m21s kubelet Node old-k8s-version-894472 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 8m20s kubelet Updated Node Allocatable limit across pods
Normal NodeReady 8m10s kubelet Node old-k8s-version-894472 status is now: NodeReady
Normal Starting 8m6s kube-proxy Starting kube-proxy.
Normal Starting 6m7s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 6m7s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 6m6s (x8 over 6m7s) kubelet Node old-k8s-version-894472 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 6m6s (x8 over 6m7s) kubelet Node old-k8s-version-894472 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 6m6s (x7 over 6m7s) kubelet Node old-k8s-version-894472 status is now: NodeHasSufficientPID
Normal Starting 5m50s kube-proxy Starting kube-proxy.
==> dmesg <==
==> etcd [15f34ed96b2b] <==
raft2024/08/16 00:04:40 INFO: 9f0758e1c58a86ed became leader at term 2
raft2024/08/16 00:04:40 INFO: raft.node: 9f0758e1c58a86ed elected leader 9f0758e1c58a86ed at term 2
2024-08-16 00:04:40.078308 I | etcdserver: setting up the initial cluster version to 3.4
2024-08-16 00:04:40.078459 I | embed: ready to serve client requests
2024-08-16 00:04:40.079878 I | embed: serving client requests on 127.0.0.1:2379
2024-08-16 00:04:40.080242 I | embed: ready to serve client requests
2024-08-16 00:04:40.081492 I | embed: serving client requests on 192.168.85.2:2379
2024-08-16 00:04:40.125684 I | etcdserver: published {Name:old-k8s-version-894472 ClientURLs:[https://192.168.85.2:2379]} to cluster 68eaea490fab4e05
2024-08-16 00:04:40.127555 N | etcdserver/membership: set the initial cluster version to 3.4
2024-08-16 00:04:40.128682 I | etcdserver/api: enabled capabilities for version 3.4
2024-08-16 00:04:56.044472 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:04:58.785442 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:05:08.805723 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:05:18.785694 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:05:28.785392 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:05:38.787903 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:05:48.790025 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:05:58.787081 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:06:08.785406 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:06:18.785465 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:06:28.785683 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:06:38.785382 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:06:45.212146 N | pkg/osutil: received terminated signal, shutting down...
WARNING: 2024/08/16 00:06:45 grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
2024-08-16 00:06:45.296668 I | etcdserver: skipped leadership transfer for single voting member cluster
==> etcd [5aacba0afc73] <==
2024-08-16 00:09:07.590100 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:09:17.589934 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:09:27.590545 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:09:37.590003 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:09:47.590047 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:09:57.590034 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:10:07.590097 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:10:17.590073 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:10:27.590074 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:10:37.589986 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:10:47.589998 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:10:57.589976 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:11:07.589990 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:11:17.589985 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:11:27.589987 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:11:37.589994 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:11:47.590123 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:11:57.590066 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:12:07.590168 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:12:17.589938 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:12:27.590057 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:12:37.590003 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:12:47.590130 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:12:57.590096 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-08-16 00:13:07.591443 I | etcdserver/api/etcdhttp: /health OK (status code 200)
==> kernel <==
00:13:15 up 8:55, 0 users, load average: 1.54, 2.78, 3.78
Linux old-k8s-version-894472 5.15.0-1067-aws #73~20.04.1-Ubuntu SMP Wed Jul 24 17:31:05 UTC 2024 aarch64 aarch64 aarch64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.4 LTS"
==> kube-apiserver [3d14903eaff5] <==
W0816 00:06:45.312772 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.312816 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.312858 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.312903 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.312945 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.312987 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.313026 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.313065 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.313107 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
I0816 00:06:45.314191 1 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick
I0816 00:06:45.314319 1 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick
I0816 00:06:45.314528 1 clientconn.go:897] blockingPicker: the picked transport is not ready, loop back to repick
W0816 00:06:45.314649 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.314853 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.314970 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.315019 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.316503 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.316563 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.316608 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.316665 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.316724 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.316765 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.316801 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.316837 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W0816 00:06:45.316871 1 clientconn.go:1223] grpc: addrConn.createTransport failed to connect to {https://127.0.0.1:2379 <nil> 0 <nil>}. Err :connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
==> kube-apiserver [682baec10b08] <==
I0816 00:09:50.663702 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I0816 00:09:50.663711 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
W0816 00:10:25.858778 1 handler_proxy.go:102] no RequestInfo found in the context
E0816 00:10:25.858912 1 controller.go:116] loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
I0816 00:10:25.858932 1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
I0816 00:10:27.023120 1 client.go:360] parsed scheme: "passthrough"
I0816 00:10:27.023167 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I0816 00:10:27.023176 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
I0816 00:11:05.478410 1 client.go:360] parsed scheme: "passthrough"
I0816 00:11:05.478456 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I0816 00:11:05.478465 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
I0816 00:11:41.279258 1 client.go:360] parsed scheme: "passthrough"
I0816 00:11:41.279307 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I0816 00:11:41.279316 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
W0816 00:12:23.659838 1 handler_proxy.go:102] no RequestInfo found in the context
E0816 00:12:23.660033 1 controller.go:116] loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
I0816 00:12:23.660069 1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
I0816 00:12:23.910084 1 client.go:360] parsed scheme: "passthrough"
I0816 00:12:23.910266 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I0816 00:12:23.910285 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
I0816 00:12:53.997069 1 client.go:360] parsed scheme: "passthrough"
I0816 00:12:53.997112 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I0816 00:12:53.997121 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
==> kube-controller-manager [821653363c67] <==
I0816 00:05:06.714136 1 node_lifecycle_controller.go:1429] Initializing eviction metric for zone:
W0816 00:05:06.714197 1 node_lifecycle_controller.go:1044] Missing timestamp for Node old-k8s-version-894472. Assuming now as a timestamp.
I0816 00:05:06.714232 1 node_lifecycle_controller.go:1245] Controller detected that zone is now in state Normal.
I0816 00:05:06.714447 1 shared_informer.go:247] Caches are synced for resource quota
I0816 00:05:06.714559 1 event.go:291] "Event occurred" object="old-k8s-version-894472" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node old-k8s-version-894472 event: Registered Node old-k8s-version-894472 in Controller"
I0816 00:05:06.735914 1 shared_informer.go:247] Caches are synced for resource quota
I0816 00:05:06.811375 1 shared_informer.go:247] Caches are synced for persistent volume
I0816 00:05:06.865254 1 event.go:291] "Event occurred" object="kube-system/coredns" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-74ff55c5b to 2"
I0816 00:05:06.899350 1 event.go:291] "Event occurred" object="kube-system/coredns-74ff55c5b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-74ff55c5b-9l4p8"
I0816 00:05:06.899571 1 event.go:291] "Event occurred" object="kube-system/kube-proxy" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-4n8ls"
I0816 00:05:06.964894 1 shared_informer.go:240] Waiting for caches to sync for garbage collector
I0816 00:05:07.037464 1 event.go:291] "Event occurred" object="kube-system/coredns-74ff55c5b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-74ff55c5b-jrtq9"
E0816 00:05:07.049580 1 clusterroleaggregation_controller.go:181] edit failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "edit": the object has been modified; please apply your changes to the latest version and try again
E0816 00:05:07.125178 1 daemon_controller.go:320] kube-system/kube-proxy failed with : error storing status for daemon set &v1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"4b07e50b-070c-49d6-b592-1506db189549", ResourceVersion:"278", Generation:1, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63859363491, loc:(*time.Location)(0x632eb80)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"kube-proxy"}, Annotations:map[string]string{"deprecated.daemonset.template.generation":"1"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"kubeadm", Operation:"Update", APIVersion:"apps/v1", Time:(*v1.Time)(0x400069d320), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0x400069d340)}}}, Spec:v1.DaemonSetSpec{Selector:(*v1.
LabelSelector)(0x400069d360), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"kube-proxy"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"kube-proxy", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.Gl
usterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(0x4000df78c0), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil)}}, v1.Volume{Name:"xtables-lock", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(0x400069d
380), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeS
ource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil)}}, v1.Volume{Name:"lib-modules", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(0x400069d3a0), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil),
AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil)}}}, InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"kube-proxy", Image:"k8s.gcr.io/kube-proxy:v1.20.0", Command:[]string{"/usr/local/bin/kube-proxy", "--config=/var/lib/kube-proxy/config.conf", "--hostname-override=$(NODE_NAME)"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar{v1.EnvVar{Name:"NODE_NAME", Value:"", ValueFrom:(*v1.EnvVarSource)(0x400069d3e0)}}, Resources:v1.R
esourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-proxy", ReadOnly:false, MountPath:"/var/lib/kube-proxy", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}, v1.VolumeMount{Name:"xtables-lock", ReadOnly:false, MountPath:"/run/xtables.lock", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}, v1.VolumeMount{Name:"lib-modules", ReadOnly:true, MountPath:"/lib/modules", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(0x4000e85920), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPo
licy:"Always", TerminationGracePeriodSeconds:(*int64)(0x400064e518), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string{"kubernetes.io/os":"linux"}, ServiceAccountName:"kube-proxy", DeprecatedServiceAccount:"kube-proxy", AutomountServiceAccountToken:(*bool)(nil), NodeName:"", HostNetwork:true, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0x40005a9b20), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"CriticalAddonsOnly", Operator:"Exists", Value:"", Effect:"", TolerationSeconds:(*int64)(nil)}, v1.Toleration{Key:"", Operator:"Exists", Value:"", Effect:"", TolerationSeconds:(*int64)(nil)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"system-node-critical", Priority:(*int32)(nil), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), Runtime
ClassName:(*string)(nil), EnableServiceLinks:(*bool)(nil), PreemptionPolicy:(*v1.PreemptionPolicy)(nil), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil), SetHostnameAsFQDN:(*bool)(nil)}}, UpdateStrategy:v1.DaemonSetUpdateStrategy{Type:"RollingUpdate", RollingUpdate:(*v1.RollingUpdateDaemonSet)(0x40002869a0)}, MinReadySeconds:0, RevisionHistoryLimit:(*int32)(0x400064e5b8)}, Status:v1.DaemonSetStatus{CurrentNumberScheduled:0, NumberMisscheduled:0, DesiredNumberScheduled:0, NumberReady:0, ObservedGeneration:0, UpdatedNumberScheduled:0, NumberAvailable:0, NumberUnavailable:0, CollisionCount:(*int32)(nil), Conditions:[]v1.DaemonSetCondition(nil)}}: Operation cannot be fulfilled on daemonsets.apps "kube-proxy": the object has been modified; please apply your changes to the latest version and try again
I0816 00:05:07.165414 1 shared_informer.go:247] Caches are synced for garbage collector
I0816 00:05:07.211431 1 shared_informer.go:247] Caches are synced for garbage collector
I0816 00:05:07.211456 1 garbagecollector.go:151] Garbage collector: all resource monitors have synced. Proceeding to collect garbage
I0816 00:05:10.030382 1 event.go:291] "Event occurred" object="kube-system/coredns" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-74ff55c5b to 1"
I0816 00:05:10.047326 1 event.go:291] "Event occurred" object="kube-system/coredns-74ff55c5b" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-74ff55c5b-9l4p8"
I0816 00:06:43.633203 1 event.go:291] "Event occurred" object="kube-system/metrics-server" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set metrics-server-9975d5f86 to 1"
E0816 00:06:43.828786 1 clusterroleaggregation_controller.go:181] edit failed with : Operation cannot be fulfilled on clusterroles.rbac.authorization.k8s.io "edit": the object has been modified; please apply your changes to the latest version and try again
I0816 00:06:44.690678 1 event.go:291] "Event occurred" object="kube-system/metrics-server-9975d5f86" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: metrics-server-9975d5f86-tt4kd"
W0816 00:06:45.344029 1 endpointslice_controller.go:284] Error syncing endpoint slices for service "kube-system/metrics-server", retrying. Error: failed to update metrics-server-zb6js EndpointSlice for Service kube-system/metrics-server: Put "https://192.168.85.2:8443/apis/discovery.k8s.io/v1beta1/namespaces/kube-system/endpointslices/metrics-server-zb6js": dial tcp 192.168.85.2:8443: connect: connection refused
I0816 00:06:45.344279 1 event.go:291] "Event occurred" object="kube-system/metrics-server" kind="Service" apiVersion="v1" type="Warning" reason="FailedToUpdateEndpointSlices" message="Error updating Endpoint Slices for Service kube-system/metrics-server: failed to update metrics-server-zb6js EndpointSlice for Service kube-system/metrics-server: Put \"https://192.168.85.2:8443/apis/discovery.k8s.io/v1beta1/namespaces/kube-system/endpointslices/metrics-server-zb6js\": dial tcp 192.168.85.2:8443: connect: connection refused"
E0816 00:06:45.344435 1 event.go:273] Unable to write event: '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"metrics-server.17ec0cc6cc23c38d", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Service", Namespace:"kube-system", Name:"metrics-server", UID:"da33dae1-dc07-40c5-ab20-ef3b4549504c", APIVersion:"v1", ResourceVersion:"587", FieldPath:""}, Reason:"FailedToUpdateEndpointSlices", Message:"Error updating Endpoint Slices for Service kube-system/metrics-server: failed to update metrics-server-zb6js EndpointSlice f
or Service kube-system/metrics-server: Put \"https://192.168.85.2:8443/apis/discovery.k8s.io/v1beta1/namespaces/kube-system/endpointslices/metrics-server-zb6js\": dial tcp 192.168.85.2:8443: connect: connection refused", Source:v1.EventSource{Component:"endpoint-slice-controller", Host:""}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc1a7c3e55481318d, ext:125858843581, loc:(*time.Location)(0x632eb80)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc1a7c3e55481318d, ext:125858843581, loc:(*time.Location)(0x632eb80)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'Post "https://192.168.85.2:8443/api/v1/namespaces/kube-system/events": dial tcp 192.168.85.2:8443: connect: connection refused'(may retry after sleeping)
==> kube-controller-manager [cc3ceefdfcf9] <==
W0816 00:08:46.783852 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E0816 00:09:12.838085 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I0816 00:09:18.434378 1 request.go:655] Throttling request took 1.048567822s, request: GET:https://192.168.85.2:8443/apis/scheduling.k8s.io/v1?timeout=32s
W0816 00:09:19.286278 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E0816 00:09:43.339796 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I0816 00:09:50.936802 1 request.go:655] Throttling request took 1.048688525s, request: GET:https://192.168.85.2:8443/apis/authorization.k8s.io/v1?timeout=32s
W0816 00:09:51.788096 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E0816 00:10:13.841394 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I0816 00:10:23.438529 1 request.go:655] Throttling request took 1.048338912s, request: GET:https://192.168.85.2:8443/apis/extensions/v1beta1?timeout=32s
W0816 00:10:24.290032 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E0816 00:10:44.343184 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I0816 00:10:55.940699 1 request.go:655] Throttling request took 1.044173579s, request: GET:https://192.168.85.2:8443/apis/extensions/v1beta1?timeout=32s
W0816 00:10:56.792111 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E0816 00:11:14.848028 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I0816 00:11:28.442555 1 request.go:655] Throttling request took 1.04839984s, request: GET:https://192.168.85.2:8443/apis/extensions/v1beta1?timeout=32s
W0816 00:11:29.293830 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E0816 00:11:45.350891 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I0816 00:12:00.947881 1 request.go:655] Throttling request took 1.047745865s, request: GET:https://192.168.85.2:8443/apis/extensions/v1beta1?timeout=32s
W0816 00:12:01.799745 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E0816 00:12:15.852703 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I0816 00:12:33.450326 1 request.go:655] Throttling request took 1.048475585s, request: GET:https://192.168.85.2:8443/apis/scheduling.k8s.io/v1beta1?timeout=32s
W0816 00:12:34.305364 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E0816 00:12:46.354509 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I0816 00:13:05.955724 1 request.go:655] Throttling request took 1.048076971s, request: GET:https://192.168.85.2:8443/apis/extensions/v1beta1?timeout=32s
W0816 00:13:06.807773 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
==> kube-proxy [1e79f4e5d490] <==
I0816 00:05:09.412461 1 node.go:172] Successfully retrieved node IP: 192.168.85.2
I0816 00:05:09.412555 1 server_others.go:142] kube-proxy node IP is an IPv4 address (192.168.85.2), assume IPv4 operation
W0816 00:05:09.511630 1 server_others.go:578] Unknown proxy mode "", assuming iptables proxy
I0816 00:05:09.511769 1 server_others.go:185] Using iptables Proxier.
I0816 00:05:09.512000 1 server.go:650] Version: v1.20.0
I0816 00:05:09.512955 1 config.go:315] Starting service config controller
I0816 00:05:09.512977 1 shared_informer.go:240] Waiting for caches to sync for service config
I0816 00:05:09.512999 1 config.go:224] Starting endpoint slice config controller
I0816 00:05:09.513003 1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
I0816 00:05:09.613104 1 shared_informer.go:247] Caches are synced for endpoint slice config
I0816 00:05:09.613183 1 shared_informer.go:247] Caches are synced for service config
==> kube-proxy [c5eeddd51e95] <==
I0816 00:07:25.738903 1 node.go:172] Successfully retrieved node IP: 192.168.85.2
I0816 00:07:25.738984 1 server_others.go:142] kube-proxy node IP is an IPv4 address (192.168.85.2), assume IPv4 operation
W0816 00:07:25.784795 1 server_others.go:578] Unknown proxy mode "", assuming iptables proxy
I0816 00:07:25.784895 1 server_others.go:185] Using iptables Proxier.
I0816 00:07:25.790418 1 server.go:650] Version: v1.20.0
I0816 00:07:25.791025 1 config.go:315] Starting service config controller
I0816 00:07:25.791041 1 shared_informer.go:240] Waiting for caches to sync for service config
I0816 00:07:25.791750 1 config.go:224] Starting endpoint slice config controller
I0816 00:07:25.791758 1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
I0816 00:07:25.891413 1 shared_informer.go:247] Caches are synced for service config
I0816 00:07:25.892527 1 shared_informer.go:247] Caches are synced for endpoint slice config
==> kube-scheduler [003fa784026a] <==
I0816 00:04:42.631485 1 serving.go:331] Generated self-signed cert in-memory
W0816 00:04:48.098756 1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W0816 00:04:48.098813 1 authentication.go:332] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W0816 00:04:48.098828 1 authentication.go:333] Continuing without authentication configuration. This may treat all requests as anonymous.
W0816 00:04:48.098838 1 authentication.go:334] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I0816 00:04:48.159120 1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I0816 00:04:48.159141 1 shared_informer.go:240] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I0816 00:04:48.166967 1 secure_serving.go:197] Serving securely on 127.0.0.1:10259
I0816 00:04:48.167164 1 tlsconfig.go:240] Starting DynamicServingCertificateController
E0816 00:04:48.190660 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0816 00:04:48.192378 1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0816 00:04:48.192552 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0816 00:04:48.192637 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1beta1.PodDisruptionBudget: failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0816 00:04:48.192710 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E0816 00:04:48.192790 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0816 00:04:48.192860 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E0816 00:04:48.192928 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0816 00:04:48.192995 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0816 00:04:48.193060 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0816 00:04:48.193117 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0816 00:04:48.193239 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0816 00:04:49.061534 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0816 00:04:49.186075 1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
I0816 00:04:51.159237 1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kube-scheduler [67be7ec054c6] <==
I0816 00:07:15.155786 1 serving.go:331] Generated self-signed cert in-memory
W0816 00:07:22.673847 1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W0816 00:07:22.673880 1 authentication.go:332] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W0816 00:07:22.673910 1 authentication.go:333] Continuing without authentication configuration. This may treat all requests as anonymous.
W0816 00:07:22.673917 1 authentication.go:334] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I0816 00:07:22.802787 1 secure_serving.go:197] Serving securely on 127.0.0.1:10259
I0816 00:07:22.811089 1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I0816 00:07:22.811176 1 shared_informer.go:240] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I0816 00:07:22.811232 1 tlsconfig.go:240] Starting DynamicServingCertificateController
I0816 00:07:22.916826 1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458707 1361 remote_image.go:113] PullImage "registry.k8s.io/echoserver:1.4" from image service failed: rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/
Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458746 1361 kuberuntime_image.go:51] Pull image "registry.k8s.io/echoserver:1.4" failed: rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/
Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458868 1361 kuberuntime_manager.go:829] container &Container{Name:dashboard-metrics-scraper,Image:registry.k8s.io/echoserver:1.4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:,HostPort:0,ContainerPort:8000,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:tmp-volume,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kubernetes-dashboard-token-2w5nt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:&Probe{Handler:Handler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 8000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,},InitialDelaySeconds:30,TimeoutSeconds:30,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,},ReadinessProbe:nil,Lifecycle:nil,Terminatio
nMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*1001,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:*2001,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df): ErrImagePull: rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/
Aug 16 00:10:53 old-k8s-version-894472 kubelet[1361]: E0816 00:10:53.458905 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ErrImagePull: "rpc error: code = Unknown desc = [DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.902219 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:11:03 old-k8s-version-894472 kubelet[1361]: E0816 00:11:03.905343 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Aug 16 00:11:16 old-k8s-version-894472 kubelet[1361]: E0816 00:11:16.908006 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Aug 16 00:11:17 old-k8s-version-894472 kubelet[1361]: E0816 00:11:17.912657 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:11:31 old-k8s-version-894472 kubelet[1361]: E0816 00:11:31.902307 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Aug 16 00:11:32 old-k8s-version-894472 kubelet[1361]: E0816 00:11:32.902285 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:11:43 old-k8s-version-894472 kubelet[1361]: E0816 00:11:43.902913 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Aug 16 00:11:47 old-k8s-version-894472 kubelet[1361]: E0816 00:11:47.902276 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:11:56 old-k8s-version-894472 kubelet[1361]: E0816 00:11:56.907938 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:01 old-k8s-version-894472 kubelet[1361]: E0816 00:12:01.910769 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:07 old-k8s-version-894472 kubelet[1361]: E0816 00:12:07.902409 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:12 old-k8s-version-894472 kubelet[1361]: E0816 00:12:12.925048 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:18 old-k8s-version-894472 kubelet[1361]: E0816 00:12:18.903513 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:23 old-k8s-version-894472 kubelet[1361]: E0816 00:12:23.902176 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:33 old-k8s-version-894472 kubelet[1361]: E0816 00:12:33.902261 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:34 old-k8s-version-894472 kubelet[1361]: E0816 00:12:34.910967 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.902827 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:47 old-k8s-version-894472 kubelet[1361]: E0816 00:12:47.903329 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Aug 16 00:12:58 old-k8s-version-894472 kubelet[1361]: E0816 00:12:58.902545 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
Aug 16 00:13:02 old-k8s-version-894472 kubelet[1361]: E0816 00:13:02.903209 1361 pod_workers.go:191] Error syncing pod 621b026b-c90c-44c4-9d8a-bf51b4f090c9 ("metrics-server-9975d5f86-tt4kd_kube-system(621b026b-c90c-44c4-9d8a-bf51b4f090c9)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Aug 16 00:13:13 old-k8s-version-894472 kubelet[1361]: E0816 00:13:13.908004 1361 pod_workers.go:191] Error syncing pod fab377e3-d182-4f20-a509-6c40680d54df ("dashboard-metrics-scraper-8d5bb5db8-vst6j_kubernetes-dashboard(fab377e3-d182-4f20-a509-6c40680d54df)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with ImagePullBackOff: "Back-off pulling image \"registry.k8s.io/echoserver:1.4\""
==> kubernetes-dashboard [3ef1e388df06] <==
2024/08/16 00:07:48 Starting overwatch
2024/08/16 00:07:48 Using namespace: kubernetes-dashboard
2024/08/16 00:07:48 Using in-cluster config to connect to apiserver
2024/08/16 00:07:48 Using secret token for csrf signing
2024/08/16 00:07:48 Initializing csrf token from kubernetes-dashboard-csrf secret
2024/08/16 00:07:48 Empty token. Generating and storing in a secret kubernetes-dashboard-csrf
2024/08/16 00:07:48 Successful initial request to the apiserver, version: v1.20.0
2024/08/16 00:07:48 Generating JWE encryption key
2024/08/16 00:07:48 New synchronizer has been registered: kubernetes-dashboard-key-holder-kubernetes-dashboard. Starting
2024/08/16 00:07:48 Starting secret synchronizer for kubernetes-dashboard-key-holder in namespace kubernetes-dashboard
2024/08/16 00:07:48 Initializing JWE encryption key from synchronized object
2024/08/16 00:07:48 Creating in-cluster Sidecar client
2024/08/16 00:07:48 Serving insecurely on HTTP port: 9090
2024/08/16 00:07:48 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/08/16 00:08:18 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/08/16 00:08:48 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/08/16 00:09:18 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/08/16 00:09:48 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/08/16 00:10:18 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/08/16 00:10:48 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/08/16 00:11:18 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/08/16 00:11:48 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/08/16 00:12:18 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/08/16 00:12:48 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
==> storage-provisioner [a6efcfb5cb17] <==
I0816 00:08:08.052876 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0816 00:08:08.086690 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0816 00:08:08.086919 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0816 00:08:25.560208 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0816 00:08:25.560444 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-894472_37e43bd1-48d4-41ee-b36e-da33dd79404d!
I0816 00:08:25.562003 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"104451a7-4333-4924-b806-36d8821f9dfd", APIVersion:"v1", ResourceVersion:"814", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-894472_37e43bd1-48d4-41ee-b36e-da33dd79404d became leader
I0816 00:08:25.661174 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-894472_37e43bd1-48d4-41ee-b36e-da33dd79404d!
==> storage-provisioner [de096650c620] <==
I0816 00:07:26.083288 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
F0816 00:07:56.086818 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-894472 -n old-k8s-version-894472
helpers_test.go:261: (dbg) Run: kubectl --context old-k8s-version-894472 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: metrics-server-9975d5f86-tt4kd dashboard-metrics-scraper-8d5bb5db8-vst6j
helpers_test.go:274: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/SecondStart]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context old-k8s-version-894472 describe pod metrics-server-9975d5f86-tt4kd dashboard-metrics-scraper-8d5bb5db8-vst6j
helpers_test.go:277: (dbg) Non-zero exit: kubectl --context old-k8s-version-894472 describe pod metrics-server-9975d5f86-tt4kd dashboard-metrics-scraper-8d5bb5db8-vst6j: exit status 1 (91.545668ms)
** stderr **
Error from server (NotFound): pods "metrics-server-9975d5f86-tt4kd" not found
Error from server (NotFound): pods "dashboard-metrics-scraper-8d5bb5db8-vst6j" not found
** /stderr **
helpers_test.go:279: kubectl --context old-k8s-version-894472 describe pod metrics-server-9975d5f86-tt4kd dashboard-metrics-scraper-8d5bb5db8-vst6j: exit status 1
--- FAIL: TestStartStop/group/old-k8s-version/serial/SecondStart (380.63s)