=== RUN TestStartStop/group/old-k8s-version/serial/SecondStart
start_stop_delete_test.go:256: (dbg) Run: out/minikube-linux-amd64 start -p old-k8s-version-280963 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.20.0
E1210 00:25:07.616719 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/kindnet-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:16.970227 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/auto-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:36.882648 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/addons-923727/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:46.903924 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:46.910379 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:46.921820 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:46.943303 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:46.984862 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:47.066372 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:47.227749 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:47.549777 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:48.191892 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:48.578802 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/kindnet-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:49.473332 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:52.035437 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:54.255456 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/calico-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:54.261926 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/calico-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:54.273411 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/calico-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:54.294940 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/calico-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:54.336461 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/calico-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:54.417993 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/calico-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:54.579567 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/calico-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:54.900878 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/calico-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:55.542547 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/calico-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:56.824232 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/calico-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:57.157212 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:57.932185 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/auto-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:25:59.385514 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/calico-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:04.507528 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/calico-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:07.399265 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:14.749324 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/calico-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:27.880721 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:35.230757 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/calico-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:53.193095 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/enable-default-cni-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:53.199591 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/enable-default-cni-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:53.211109 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/enable-default-cni-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:53.232601 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/enable-default-cni-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:53.274583 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/enable-default-cni-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:53.356180 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/enable-default-cni-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:53.517744 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/enable-default-cni-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:53.839914 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/enable-default-cni-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:54.482121 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/enable-default-cni-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:55.764431 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/enable-default-cni-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:26:58.326514 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/enable-default-cni-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:03.448684 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/enable-default-cni-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:08.843130 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:10.500631 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/kindnet-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:13.153590 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:13.160057 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:13.171507 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:13.192949 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:13.234529 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:13.316095 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:13.477720 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:13.690610 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/enable-default-cni-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:13.800083 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:14.441748 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:15.723857 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:16.192808 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/calico-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:18.285966 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:19.854014 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/auto-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:23.407811 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:33.649727 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:33.812457 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/addons-923727/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:34.172160 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/enable-default-cni-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:42.891419 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/bridge-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:42.897892 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/bridge-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:42.909347 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/bridge-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:42.930778 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/bridge-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:42.972289 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/bridge-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:43.053957 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/bridge-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:43.215929 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/bridge-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:43.538153 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/bridge-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:44.180132 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/bridge-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:45.462534 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/bridge-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:48.024138 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/bridge-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:53.146364 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/bridge-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:27:54.132163 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:28:03.388152 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/bridge-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:28:13.584295 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/functional-618530/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:28:15.134464 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/enable-default-cni-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:28:23.869772 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/bridge-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:28:30.764697 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/custom-flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
E1210 00:28:35.093739 533916 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/flannel-085288/client.crt: no such file or directory" logger="UnhandledError"
start_stop_delete_test.go:256: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p old-k8s-version-280963 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.20.0: exit status 102 (6m17.576229017s)
-- stdout --
* [old-k8s-version-280963] minikube v1.34.0 on Ubuntu 20.04 (kvm/amd64)
- MINIKUBE_LOCATION=20062
- MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
- KUBECONFIG=/home/jenkins/minikube-integration/20062-527107/kubeconfig
- MINIKUBE_HOME=/home/jenkins/minikube-integration/20062-527107/.minikube
- MINIKUBE_BIN=out/minikube-linux-amd64
- MINIKUBE_FORCE_SYSTEMD=
* Kubernetes 1.31.2 is now available. If you would like to upgrade, specify: --kubernetes-version=v1.31.2
* Using the docker driver based on existing profile
* Starting "old-k8s-version-280963" primary control-plane node in "old-k8s-version-280963" cluster
* Pulling base image v0.0.45-1730888964-19917 ...
* Restarting existing docker container for "old-k8s-version-280963" ...
* Preparing Kubernetes v1.20.0 on containerd 1.7.22 ...
* Verifying Kubernetes components...
- Using image fake.domain/registry.k8s.io/echoserver:1.4
- Using image docker.io/kubernetesui/dashboard:v2.7.0
- Using image gcr.io/k8s-minikube/storage-provisioner:v5
- Using image registry.k8s.io/echoserver:1.4
* Some dashboard features require the metrics-server addon. To enable all features please run:
minikube -p old-k8s-version-280963 addons enable metrics-server
* Enabled addons: metrics-server, default-storageclass, storage-provisioner, dashboard
-- /stdout --
** stderr **
I1210 00:25:04.955946 869958 out.go:345] Setting OutFile to fd 1 ...
I1210 00:25:04.956096 869958 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1210 00:25:04.956109 869958 out.go:358] Setting ErrFile to fd 2...
I1210 00:25:04.956116 869958 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1210 00:25:04.956525 869958 root.go:338] Updating PATH: /home/jenkins/minikube-integration/20062-527107/.minikube/bin
I1210 00:25:04.957354 869958 out.go:352] Setting JSON to false
I1210 00:25:04.959263 869958 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent-6","uptime":11249,"bootTime":1733779056,"procs":636,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1071-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1210 00:25:04.959396 869958 start.go:139] virtualization: kvm guest
I1210 00:25:04.961100 869958 out.go:177] * [old-k8s-version-280963] minikube v1.34.0 on Ubuntu 20.04 (kvm/amd64)
I1210 00:25:04.962822 869958 notify.go:220] Checking for updates...
I1210 00:25:04.962896 869958 out.go:177] - MINIKUBE_LOCATION=20062
I1210 00:25:04.964460 869958 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1210 00:25:04.965964 869958 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/20062-527107/kubeconfig
I1210 00:25:04.967799 869958 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/20062-527107/.minikube
I1210 00:25:04.969313 869958 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I1210 00:25:04.970642 869958 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I1210 00:25:04.972583 869958 config.go:182] Loaded profile config "old-k8s-version-280963": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.20.0
I1210 00:25:04.974529 869958 out.go:177] * Kubernetes 1.31.2 is now available. If you would like to upgrade, specify: --kubernetes-version=v1.31.2
I1210 00:25:04.975710 869958 driver.go:394] Setting default libvirt URI to qemu:///system
I1210 00:25:05.001125 869958 docker.go:123] docker version: linux-27.4.0:Docker Engine - Community
I1210 00:25:05.001300 869958 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1210 00:25:05.062760 869958 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:4 ContainersRunning:3 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:59 OomKillDisable:true NGoroutines:75 SystemTime:2024-12-10 00:25:05.050309938 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1071-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647935488 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:27.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:88bf19b2105c8b17560993bee28a01ddc2f97182 Expected:88bf19b2105c8b17560993bee28a01ddc2f97182} RuncCommit:{ID:v1.2.2-0-g7cb3632 Expected:v1.2.2-0-g7cb3632} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: bridg
e-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.19.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.31.0] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1210 00:25:05.062954 869958 docker.go:318] overlay module found
I1210 00:25:05.064816 869958 out.go:177] * Using the docker driver based on existing profile
I1210 00:25:05.066283 869958 start.go:297] selected driver: docker
I1210 00:25:05.066302 869958 start.go:901] validating driver "docker" against &{Name:old-k8s-version-280963 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1730888964-19917@sha256:629a5748e3ec15a091fef12257eb3754b8ffc0c974ebcbb016451c65d1829615 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-280963 Namespace:default APIServerHAVIP: AP
IServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:fa
lse MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1210 00:25:05.066393 869958 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1210 00:25:05.067287 869958 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1210 00:25:05.114952 869958 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:4 ContainersRunning:3 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:59 OomKillDisable:true NGoroutines:75 SystemTime:2024-12-10 00:25:05.105417382 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1071-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647935488 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:27.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:88bf19b2105c8b17560993bee28a01ddc2f97182 Expected:88bf19b2105c8b17560993bee28a01ddc2f97182} RuncCommit:{ID:v1.2.2-0-g7cb3632 Expected:v1.2.2-0-g7cb3632} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: bridg
e-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.19.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.31.0] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1210 00:25:05.115361 869958 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1210 00:25:05.115394 869958 cni.go:84] Creating CNI manager for ""
I1210 00:25:05.115445 869958 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1210 00:25:05.115491 869958 start.go:340] cluster config:
{Name:old-k8s-version-280963 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1730888964-19917@sha256:629a5748e3ec15a091fef12257eb3754b8ffc0c974ebcbb016451c65d1829615 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-280963 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:do
cker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1210 00:25:05.117575 869958 out.go:177] * Starting "old-k8s-version-280963" primary control-plane node in "old-k8s-version-280963" cluster
I1210 00:25:05.118962 869958 cache.go:121] Beginning downloading kic base image for docker with containerd
I1210 00:25:05.120396 869958 out.go:177] * Pulling base image v0.0.45-1730888964-19917 ...
I1210 00:25:05.121598 869958 preload.go:131] Checking if preload exists for k8s version v1.20.0 and runtime containerd
I1210 00:25:05.121642 869958 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1730888964-19917@sha256:629a5748e3ec15a091fef12257eb3754b8ffc0c974ebcbb016451c65d1829615 in local docker daemon
I1210 00:25:05.121659 869958 preload.go:146] Found local preload: /home/jenkins/minikube-integration/20062-527107/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.20.0-containerd-overlay2-amd64.tar.lz4
I1210 00:25:05.121677 869958 cache.go:56] Caching tarball of preloaded images
I1210 00:25:05.121801 869958 preload.go:172] Found /home/jenkins/minikube-integration/20062-527107/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.20.0-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I1210 00:25:05.121817 869958 cache.go:59] Finished verifying existence of preloaded tar for v1.20.0 on containerd
I1210 00:25:05.121960 869958 profile.go:143] Saving config to /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/old-k8s-version-280963/config.json ...
I1210 00:25:05.143674 869958 image.go:98] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1730888964-19917@sha256:629a5748e3ec15a091fef12257eb3754b8ffc0c974ebcbb016451c65d1829615 in local docker daemon, skipping pull
I1210 00:25:05.143699 869958 cache.go:144] gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1730888964-19917@sha256:629a5748e3ec15a091fef12257eb3754b8ffc0c974ebcbb016451c65d1829615 exists in daemon, skipping load
I1210 00:25:05.143717 869958 cache.go:194] Successfully downloaded all kic artifacts
I1210 00:25:05.143752 869958 start.go:360] acquireMachinesLock for old-k8s-version-280963: {Name:mk866f9896e80cc71597f575ad6ef1d7edb45190 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1210 00:25:05.143833 869958 start.go:364] duration metric: took 57.999µs to acquireMachinesLock for "old-k8s-version-280963"
I1210 00:25:05.143858 869958 start.go:96] Skipping create...Using existing machine configuration
I1210 00:25:05.143866 869958 fix.go:54] fixHost starting:
I1210 00:25:05.144079 869958 cli_runner.go:164] Run: docker container inspect old-k8s-version-280963 --format={{.State.Status}}
I1210 00:25:05.163680 869958 fix.go:112] recreateIfNeeded on old-k8s-version-280963: state=Stopped err=<nil>
W1210 00:25:05.163723 869958 fix.go:138] unexpected machine state, will restart: <nil>
I1210 00:25:05.165617 869958 out.go:177] * Restarting existing docker container for "old-k8s-version-280963" ...
I1210 00:25:05.167193 869958 cli_runner.go:164] Run: docker start old-k8s-version-280963
I1210 00:25:05.475855 869958 cli_runner.go:164] Run: docker container inspect old-k8s-version-280963 --format={{.State.Status}}
I1210 00:25:05.496427 869958 kic.go:430] container "old-k8s-version-280963" state is running.
I1210 00:25:05.497034 869958 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-280963
I1210 00:25:05.517785 869958 profile.go:143] Saving config to /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/old-k8s-version-280963/config.json ...
I1210 00:25:05.518140 869958 machine.go:93] provisionDockerMachine start ...
I1210 00:25:05.518222 869958 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-280963
I1210 00:25:05.538086 869958 main.go:141] libmachine: Using SSH client type: native
I1210 00:25:05.538385 869958 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x866ca0] 0x869980 <nil> [] 0s} 127.0.0.1 33625 <nil> <nil>}
I1210 00:25:05.538403 869958 main.go:141] libmachine: About to run SSH command:
hostname
I1210 00:25:05.539233 869958 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:52728->127.0.0.1:33625: read: connection reset by peer
I1210 00:25:08.674726 869958 main.go:141] libmachine: SSH cmd err, output: <nil>: old-k8s-version-280963
I1210 00:25:08.674761 869958 ubuntu.go:169] provisioning hostname "old-k8s-version-280963"
I1210 00:25:08.674875 869958 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-280963
I1210 00:25:08.695106 869958 main.go:141] libmachine: Using SSH client type: native
I1210 00:25:08.695369 869958 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x866ca0] 0x869980 <nil> [] 0s} 127.0.0.1 33625 <nil> <nil>}
I1210 00:25:08.695396 869958 main.go:141] libmachine: About to run SSH command:
sudo hostname old-k8s-version-280963 && echo "old-k8s-version-280963" | sudo tee /etc/hostname
I1210 00:25:08.839885 869958 main.go:141] libmachine: SSH cmd err, output: <nil>: old-k8s-version-280963
I1210 00:25:08.839987 869958 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-280963
I1210 00:25:08.858288 869958 main.go:141] libmachine: Using SSH client type: native
I1210 00:25:08.858477 869958 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x866ca0] 0x869980 <nil> [] 0s} 127.0.0.1 33625 <nil> <nil>}
I1210 00:25:08.858495 869958 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-280963' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-280963/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-280963' | sudo tee -a /etc/hosts;
fi
fi
I1210 00:25:08.987602 869958 main.go:141] libmachine: SSH cmd err, output: <nil>:
I1210 00:25:08.987635 869958 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/20062-527107/.minikube CaCertPath:/home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/20062-527107/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/20062-527107/.minikube}
I1210 00:25:08.987667 869958 ubuntu.go:177] setting up certificates
I1210 00:25:08.987680 869958 provision.go:84] configureAuth start
I1210 00:25:08.987751 869958 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-280963
I1210 00:25:09.005179 869958 provision.go:143] copyHostCerts
I1210 00:25:09.005259 869958 exec_runner.go:144] found /home/jenkins/minikube-integration/20062-527107/.minikube/ca.pem, removing ...
I1210 00:25:09.005283 869958 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20062-527107/.minikube/ca.pem
I1210 00:25:09.005367 869958 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/20062-527107/.minikube/ca.pem (1082 bytes)
I1210 00:25:09.005497 869958 exec_runner.go:144] found /home/jenkins/minikube-integration/20062-527107/.minikube/cert.pem, removing ...
I1210 00:25:09.005509 869958 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20062-527107/.minikube/cert.pem
I1210 00:25:09.005547 869958 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/20062-527107/.minikube/cert.pem (1123 bytes)
I1210 00:25:09.005620 869958 exec_runner.go:144] found /home/jenkins/minikube-integration/20062-527107/.minikube/key.pem, removing ...
I1210 00:25:09.005630 869958 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20062-527107/.minikube/key.pem
I1210 00:25:09.005665 869958 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/20062-527107/.minikube/key.pem (1679 bytes)
I1210 00:25:09.005733 869958 provision.go:117] generating server cert: /home/jenkins/minikube-integration/20062-527107/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-280963 san=[127.0.0.1 192.168.85.2 localhost minikube old-k8s-version-280963]
I1210 00:25:09.107023 869958 provision.go:177] copyRemoteCerts
I1210 00:25:09.107096 869958 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1210 00:25:09.107135 869958 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-280963
I1210 00:25:09.125290 869958 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33625 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/old-k8s-version-280963/id_rsa Username:docker}
I1210 00:25:09.220303 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1210 00:25:09.243742 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1210 00:25:09.269835 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1210 00:25:09.293843 869958 provision.go:87] duration metric: took 306.148134ms to configureAuth
I1210 00:25:09.293876 869958 ubuntu.go:193] setting minikube options for container-runtime
I1210 00:25:09.294074 869958 config.go:182] Loaded profile config "old-k8s-version-280963": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.20.0
I1210 00:25:09.294088 869958 machine.go:96] duration metric: took 3.775928341s to provisionDockerMachine
I1210 00:25:09.294099 869958 start.go:293] postStartSetup for "old-k8s-version-280963" (driver="docker")
I1210 00:25:09.294114 869958 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1210 00:25:09.294172 869958 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1210 00:25:09.294240 869958 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-280963
I1210 00:25:09.312574 869958 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33625 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/old-k8s-version-280963/id_rsa Username:docker}
I1210 00:25:09.408223 869958 ssh_runner.go:195] Run: cat /etc/os-release
I1210 00:25:09.411858 869958 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1210 00:25:09.411909 869958 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I1210 00:25:09.411918 869958 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I1210 00:25:09.411925 869958 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I1210 00:25:09.411936 869958 filesync.go:126] Scanning /home/jenkins/minikube-integration/20062-527107/.minikube/addons for local assets ...
I1210 00:25:09.411989 869958 filesync.go:126] Scanning /home/jenkins/minikube-integration/20062-527107/.minikube/files for local assets ...
I1210 00:25:09.412101 869958 filesync.go:149] local asset: /home/jenkins/minikube-integration/20062-527107/.minikube/files/etc/ssl/certs/5339162.pem -> 5339162.pem in /etc/ssl/certs
I1210 00:25:09.412194 869958 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1210 00:25:09.421111 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/files/etc/ssl/certs/5339162.pem --> /etc/ssl/certs/5339162.pem (1708 bytes)
I1210 00:25:09.444868 869958 start.go:296] duration metric: took 150.747825ms for postStartSetup
I1210 00:25:09.444962 869958 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1210 00:25:09.445017 869958 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-280963
I1210 00:25:09.462877 869958 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33625 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/old-k8s-version-280963/id_rsa Username:docker}
I1210 00:25:09.556135 869958 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1210 00:25:09.561322 869958 fix.go:56] duration metric: took 4.417449431s for fixHost
I1210 00:25:09.561358 869958 start.go:83] releasing machines lock for "old-k8s-version-280963", held for 4.417502543s
I1210 00:25:09.561430 869958 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-280963
I1210 00:25:09.579184 869958 ssh_runner.go:195] Run: cat /version.json
I1210 00:25:09.579242 869958 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-280963
I1210 00:25:09.579277 869958 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1210 00:25:09.579343 869958 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-280963
I1210 00:25:09.597163 869958 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33625 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/old-k8s-version-280963/id_rsa Username:docker}
I1210 00:25:09.597409 869958 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33625 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/old-k8s-version-280963/id_rsa Username:docker}
I1210 00:25:09.766625 869958 ssh_runner.go:195] Run: systemctl --version
I1210 00:25:09.771804 869958 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I1210 00:25:09.776413 869958 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I1210 00:25:09.794671 869958 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I1210 00:25:09.794741 869958 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1210 00:25:09.803442 869958 cni.go:259] no active bridge cni configs found in "/etc/cni/net.d" - nothing to disable
I1210 00:25:09.803474 869958 start.go:495] detecting cgroup driver to use...
I1210 00:25:09.803511 869958 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1210 00:25:09.803564 869958 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1210 00:25:09.816520 869958 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1210 00:25:09.828022 869958 docker.go:217] disabling cri-docker service (if available) ...
I1210 00:25:09.828091 869958 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1210 00:25:09.841277 869958 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1210 00:25:09.853753 869958 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1210 00:25:09.936231 869958 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1210 00:25:10.007547 869958 docker.go:233] disabling docker service ...
I1210 00:25:10.007711 869958 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1210 00:25:10.020718 869958 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1210 00:25:10.031812 869958 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1210 00:25:10.107125 869958 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1210 00:25:10.185441 869958 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1210 00:25:10.197022 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1210 00:25:10.213625 869958 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.2"|' /etc/containerd/config.toml"
I1210 00:25:10.223636 869958 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1210 00:25:10.234165 869958 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1210 00:25:10.234230 869958 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1210 00:25:10.245193 869958 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1210 00:25:10.255178 869958 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1210 00:25:10.265685 869958 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1210 00:25:10.275924 869958 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1210 00:25:10.285519 869958 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1210 00:25:10.295444 869958 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1210 00:25:10.303778 869958 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1210 00:25:10.312599 869958 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1210 00:25:10.390119 869958 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1210 00:25:10.513095 869958 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I1210 00:25:10.513190 869958 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1210 00:25:10.518071 869958 start.go:563] Will wait 60s for crictl version
I1210 00:25:10.518159 869958 ssh_runner.go:195] Run: which crictl
I1210 00:25:10.523347 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1210 00:25:10.570593 869958 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.22
RuntimeApiVersion: v1
I1210 00:25:10.570671 869958 ssh_runner.go:195] Run: containerd --version
I1210 00:25:10.594116 869958 ssh_runner.go:195] Run: containerd --version
I1210 00:25:10.621002 869958 out.go:177] * Preparing Kubernetes v1.20.0 on containerd 1.7.22 ...
I1210 00:25:10.622294 869958 cli_runner.go:164] Run: docker network inspect old-k8s-version-280963 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1210 00:25:10.643550 869958 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I1210 00:25:10.648089 869958 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1210 00:25:10.662028 869958 kubeadm.go:883] updating cluster {Name:old-k8s-version-280963 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1730888964-19917@sha256:629a5748e3ec15a091fef12257eb3754b8ffc0c974ebcbb016451c65d1829615 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-280963 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/
home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1210 00:25:10.662155 869958 preload.go:131] Checking if preload exists for k8s version v1.20.0 and runtime containerd
I1210 00:25:10.662216 869958 ssh_runner.go:195] Run: sudo crictl images --output json
I1210 00:25:10.697231 869958 containerd.go:623] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.20.0". assuming images are not preloaded.
I1210 00:25:10.697310 869958 ssh_runner.go:195] Run: which lz4
I1210 00:25:10.701083 869958 ssh_runner.go:195] Run: stat -c "%s %y" /preloaded.tar.lz4
I1210 00:25:10.704660 869958 ssh_runner.go:352] existence check for /preloaded.tar.lz4: stat -c "%s %y" /preloaded.tar.lz4: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/preloaded.tar.lz4': No such file or directory
I1210 00:25:10.704694 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.20.0-containerd-overlay2-amd64.tar.lz4 --> /preloaded.tar.lz4 (472503869 bytes)
I1210 00:25:11.742097 869958 containerd.go:563] duration metric: took 1.041055167s to copy over tarball
I1210 00:25:11.742178 869958 ssh_runner.go:195] Run: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4
I1210 00:25:14.476560 869958 ssh_runner.go:235] Completed: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4: (2.734334522s)
I1210 00:25:14.476608 869958 containerd.go:570] duration metric: took 2.734480705s to extract the tarball
I1210 00:25:14.476619 869958 ssh_runner.go:146] rm: /preloaded.tar.lz4
I1210 00:25:15.425434 869958 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1210 00:25:15.507986 869958 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1210 00:25:15.617869 869958 ssh_runner.go:195] Run: sudo crictl images --output json
I1210 00:25:15.654728 869958 containerd.go:623] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.20.0". assuming images are not preloaded.
I1210 00:25:15.654756 869958 cache_images.go:88] LoadCachedImages start: [registry.k8s.io/kube-apiserver:v1.20.0 registry.k8s.io/kube-controller-manager:v1.20.0 registry.k8s.io/kube-scheduler:v1.20.0 registry.k8s.io/kube-proxy:v1.20.0 registry.k8s.io/pause:3.2 registry.k8s.io/etcd:3.4.13-0 registry.k8s.io/coredns:1.7.0 gcr.io/k8s-minikube/storage-provisioner:v5]
I1210 00:25:15.654868 869958 image.go:135] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
I1210 00:25:15.655160 869958 image.go:135] retrieving image: registry.k8s.io/pause:3.2
I1210 00:25:15.655188 869958 image.go:135] retrieving image: registry.k8s.io/kube-apiserver:v1.20.0
I1210 00:25:15.655349 869958 image.go:135] retrieving image: registry.k8s.io/etcd:3.4.13-0
I1210 00:25:15.655392 869958 image.go:135] retrieving image: registry.k8s.io/kube-controller-manager:v1.20.0
I1210 00:25:15.655495 869958 image.go:135] retrieving image: registry.k8s.io/coredns:1.7.0
I1210 00:25:15.655517 869958 image.go:135] retrieving image: registry.k8s.io/kube-scheduler:v1.20.0
I1210 00:25:15.655163 869958 image.go:135] retrieving image: registry.k8s.io/kube-proxy:v1.20.0
I1210 00:25:15.656477 869958 image.go:178] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
I1210 00:25:15.656866 869958 image.go:178] daemon lookup for registry.k8s.io/kube-scheduler:v1.20.0: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.20.0
I1210 00:25:15.656876 869958 image.go:178] daemon lookup for registry.k8s.io/kube-controller-manager:v1.20.0: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.20.0
I1210 00:25:15.656876 869958 image.go:178] daemon lookup for registry.k8s.io/coredns:1.7.0: Error response from daemon: No such image: registry.k8s.io/coredns:1.7.0
I1210 00:25:15.656936 869958 image.go:178] daemon lookup for registry.k8s.io/pause:3.2: Error response from daemon: No such image: registry.k8s.io/pause:3.2
I1210 00:25:15.656944 869958 image.go:178] daemon lookup for registry.k8s.io/kube-apiserver:v1.20.0: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.20.0
I1210 00:25:15.656961 869958 image.go:178] daemon lookup for registry.k8s.io/etcd:3.4.13-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.4.13-0
I1210 00:25:15.656964 869958 image.go:178] daemon lookup for registry.k8s.io/kube-proxy:v1.20.0: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.20.0
I1210 00:25:15.863560 869958 containerd.go:267] Checking existence of image with name "registry.k8s.io/pause:3.2" and sha "80d28bedfe5dec59da9ebf8e6260224ac9008ab5c11dbbe16ee3ba3e4439ac2c"
I1210 00:25:15.863625 869958 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/pause:3.2
I1210 00:25:15.868280 869958 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.20.0" and sha "b9fa1895dcaa6d3dd241d6d9340e939ca30fc0946464ec9f205a8cbe738a8080"
I1210 00:25:15.868348 869958 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-controller-manager:v1.20.0
I1210 00:25:15.879936 869958 containerd.go:267] Checking existence of image with name "registry.k8s.io/coredns:1.7.0" and sha "bfe3a36ebd2528b454be6aebece806db5b40407b833e2af9617bf39afaff8c16"
I1210 00:25:15.880014 869958 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/coredns:1.7.0
I1210 00:25:15.885334 869958 containerd.go:267] Checking existence of image with name "registry.k8s.io/etcd:3.4.13-0" and sha "0369cf4303ffdb467dc219990960a9baa8512a54b0ad9283eaf55bd6c0adb934"
I1210 00:25:15.885408 869958 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/etcd:3.4.13-0
I1210 00:25:15.885881 869958 cache_images.go:116] "registry.k8s.io/pause:3.2" needs transfer: "registry.k8s.io/pause:3.2" does not exist at hash "80d28bedfe5dec59da9ebf8e6260224ac9008ab5c11dbbe16ee3ba3e4439ac2c" in container runtime
I1210 00:25:15.885927 869958 cri.go:218] Removing image: registry.k8s.io/pause:3.2
I1210 00:25:15.885968 869958 ssh_runner.go:195] Run: which crictl
I1210 00:25:15.889448 869958 cache_images.go:116] "registry.k8s.io/kube-controller-manager:v1.20.0" needs transfer: "registry.k8s.io/kube-controller-manager:v1.20.0" does not exist at hash "b9fa1895dcaa6d3dd241d6d9340e939ca30fc0946464ec9f205a8cbe738a8080" in container runtime
I1210 00:25:15.889500 869958 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.20.0
I1210 00:25:15.889541 869958 ssh_runner.go:195] Run: which crictl
I1210 00:25:15.890130 869958 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.20.0" and sha "3138b6e3d471224fd516f758f3b53309219bcb6824e07686b3cd60d78012c899"
I1210 00:25:15.890182 869958 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-scheduler:v1.20.0
I1210 00:25:15.891810 869958 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.20.0" and sha "10cc881966cfd9287656c2fce1f144625602653d1e8b011487a7a71feb100bdc"
I1210 00:25:15.891863 869958 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-proxy:v1.20.0
I1210 00:25:15.895618 869958 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.20.0" and sha "ca9843d3b545457f24b012d6d579ba85f132f2406aa171ad84d53caa55e5de99"
I1210 00:25:15.895675 869958 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-apiserver:v1.20.0
I1210 00:25:15.905650 869958 cache_images.go:116] "registry.k8s.io/coredns:1.7.0" needs transfer: "registry.k8s.io/coredns:1.7.0" does not exist at hash "bfe3a36ebd2528b454be6aebece806db5b40407b833e2af9617bf39afaff8c16" in container runtime
I1210 00:25:15.905707 869958 cri.go:218] Removing image: registry.k8s.io/coredns:1.7.0
I1210 00:25:15.905761 869958 ssh_runner.go:195] Run: which crictl
I1210 00:25:15.909440 869958 cache_images.go:116] "registry.k8s.io/etcd:3.4.13-0" needs transfer: "registry.k8s.io/etcd:3.4.13-0" does not exist at hash "0369cf4303ffdb467dc219990960a9baa8512a54b0ad9283eaf55bd6c0adb934" in container runtime
I1210 00:25:15.909496 869958 cri.go:218] Removing image: registry.k8s.io/etcd:3.4.13-0
I1210 00:25:15.909540 869958 ssh_runner.go:195] Run: which crictl
I1210 00:25:15.909580 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/pause:3.2
I1210 00:25:15.909647 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.20.0
I1210 00:25:15.915935 869958 cache_images.go:116] "registry.k8s.io/kube-scheduler:v1.20.0" needs transfer: "registry.k8s.io/kube-scheduler:v1.20.0" does not exist at hash "3138b6e3d471224fd516f758f3b53309219bcb6824e07686b3cd60d78012c899" in container runtime
I1210 00:25:15.916013 869958 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.20.0
I1210 00:25:15.916072 869958 ssh_runner.go:195] Run: which crictl
I1210 00:25:15.933561 869958 cache_images.go:116] "registry.k8s.io/kube-proxy:v1.20.0" needs transfer: "registry.k8s.io/kube-proxy:v1.20.0" does not exist at hash "10cc881966cfd9287656c2fce1f144625602653d1e8b011487a7a71feb100bdc" in container runtime
I1210 00:25:15.933604 869958 cache_images.go:116] "registry.k8s.io/kube-apiserver:v1.20.0" needs transfer: "registry.k8s.io/kube-apiserver:v1.20.0" does not exist at hash "ca9843d3b545457f24b012d6d579ba85f132f2406aa171ad84d53caa55e5de99" in container runtime
I1210 00:25:15.933625 869958 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.20.0
I1210 00:25:15.933641 869958 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.20.0
I1210 00:25:15.933670 869958 ssh_runner.go:195] Run: which crictl
I1210 00:25:15.933683 869958 ssh_runner.go:195] Run: which crictl
I1210 00:25:15.933690 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/coredns:1.7.0
I1210 00:25:15.960949 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.20.0
I1210 00:25:15.966484 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/pause:3.2
I1210 00:25:15.966564 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/etcd:3.4.13-0
I1210 00:25:15.966583 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.20.0
I1210 00:25:15.966604 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-proxy:v1.20.0
I1210 00:25:15.966657 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.20.0
I1210 00:25:16.035430 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/coredns:1.7.0
I1210 00:25:16.145718 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.20.0
I1210 00:25:16.155281 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.20.0
I1210 00:25:16.155426 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-proxy:v1.20.0
I1210 00:25:16.155519 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/etcd:3.4.13-0
I1210 00:25:16.155607 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/pause:3.2
I1210 00:25:16.155687 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.20.0
I1210 00:25:16.243390 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/coredns:1.7.0
I1210 00:25:16.350165 869958 cache_images.go:289] Loading image from: /home/jenkins/minikube-integration/20062-527107/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.20.0
I1210 00:25:16.356937 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-proxy:v1.20.0
I1210 00:25:16.357022 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.20.0
I1210 00:25:16.357072 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.20.0
I1210 00:25:16.357145 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi registry.k8s.io/etcd:3.4.13-0
I1210 00:25:16.357157 869958 cache_images.go:289] Loading image from: /home/jenkins/minikube-integration/20062-527107/.minikube/cache/images/amd64/registry.k8s.io/pause_3.2
I1210 00:25:16.433936 869958 cache_images.go:289] Loading image from: /home/jenkins/minikube-integration/20062-527107/.minikube/cache/images/amd64/registry.k8s.io/coredns_1.7.0
I1210 00:25:16.528644 869958 cache_images.go:289] Loading image from: /home/jenkins/minikube-integration/20062-527107/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.4.13-0
I1210 00:25:16.528722 869958 cache_images.go:289] Loading image from: /home/jenkins/minikube-integration/20062-527107/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.20.0
I1210 00:25:16.528813 869958 cache_images.go:289] Loading image from: /home/jenkins/minikube-integration/20062-527107/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.20.0
I1210 00:25:16.528838 869958 cache_images.go:289] Loading image from: /home/jenkins/minikube-integration/20062-527107/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.20.0
I1210 00:25:16.933384 869958 containerd.go:267] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562"
I1210 00:25:16.933471 869958 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5
I1210 00:25:16.962460 869958 cache_images.go:116] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562" in container runtime
I1210 00:25:16.962573 869958 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
I1210 00:25:16.962649 869958 ssh_runner.go:195] Run: which crictl
I1210 00:25:16.966791 869958 ssh_runner.go:195] Run: sudo /usr/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1210 00:25:17.304535 869958 cache_images.go:289] Loading image from: /home/jenkins/minikube-integration/20062-527107/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5
I1210 00:25:17.304669 869958 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5
I1210 00:25:17.308395 869958 ssh_runner.go:356] copy: skipping /var/lib/minikube/images/storage-provisioner_v5 (exists)
I1210 00:25:17.308421 869958 containerd.go:285] Loading image: /var/lib/minikube/images/storage-provisioner_v5
I1210 00:25:17.308491 869958 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5
I1210 00:25:18.362304 869958 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5: (1.053781543s)
I1210 00:25:18.362341 869958 cache_images.go:321] Transferred and loaded /home/jenkins/minikube-integration/20062-527107/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
I1210 00:25:18.362401 869958 cache_images.go:92] duration metric: took 2.707617724s to LoadCachedImages
W1210 00:25:18.362485 869958 out.go:270] X Unable to load cached images: LoadCachedImages: stat /home/jenkins/minikube-integration/20062-527107/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.20.0: no such file or directory
X Unable to load cached images: LoadCachedImages: stat /home/jenkins/minikube-integration/20062-527107/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.20.0: no such file or directory
I1210 00:25:18.362507 869958 kubeadm.go:934] updating node { 192.168.85.2 8443 v1.20.0 containerd true true} ...
I1210 00:25:18.362645 869958 kubeadm.go:946] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.20.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --hostname-override=old-k8s-version-280963 --kubeconfig=/etc/kubernetes/kubelet.conf --network-plugin=cni --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-280963 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1210 00:25:18.362711 869958 ssh_runner.go:195] Run: sudo crictl info
I1210 00:25:18.398423 869958 cni.go:84] Creating CNI manager for ""
I1210 00:25:18.398446 869958 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1210 00:25:18.398457 869958 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I1210 00:25:18.398531 869958 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.20.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-280963 NodeName:old-k8s-version-280963 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt
StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:false}
I1210 00:25:18.398693 869958 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /run/containerd/containerd.sock
name: "old-k8s-version-280963"
kubeletExtraArgs:
node-ip: 192.168.85.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1210 00:25:18.398757 869958 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.20.0
I1210 00:25:18.408496 869958 binaries.go:44] Found k8s binaries, skipping transfer
I1210 00:25:18.408580 869958 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1210 00:25:18.418508 869958 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (442 bytes)
I1210 00:25:18.437815 869958 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1210 00:25:18.456538 869958 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2125 bytes)
I1210 00:25:18.474639 869958 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I1210 00:25:18.478394 869958 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1210 00:25:18.490087 869958 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1210 00:25:18.577629 869958 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1210 00:25:18.591376 869958 certs.go:68] Setting up /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/old-k8s-version-280963 for IP: 192.168.85.2
I1210 00:25:18.591398 869958 certs.go:194] generating shared ca certs ...
I1210 00:25:18.591415 869958 certs.go:226] acquiring lock for ca certs: {Name:mk98ae8901439369b17532a89b5c8e73a55c28a4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1210 00:25:18.591563 869958 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/20062-527107/.minikube/ca.key
I1210 00:25:18.591600 869958 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/20062-527107/.minikube/proxy-client-ca.key
I1210 00:25:18.591609 869958 certs.go:256] generating profile certs ...
I1210 00:25:18.591706 869958 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/old-k8s-version-280963/client.key
I1210 00:25:18.591760 869958 certs.go:359] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/old-k8s-version-280963/apiserver.key.32b39cbb
I1210 00:25:18.591803 869958 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/old-k8s-version-280963/proxy-client.key
I1210 00:25:18.591901 869958 certs.go:484] found cert: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/533916.pem (1338 bytes)
W1210 00:25:18.591928 869958 certs.go:480] ignoring /home/jenkins/minikube-integration/20062-527107/.minikube/certs/533916_empty.pem, impossibly tiny 0 bytes
I1210 00:25:18.591935 869958 certs.go:484] found cert: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca-key.pem (1675 bytes)
I1210 00:25:18.591959 869958 certs.go:484] found cert: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca.pem (1082 bytes)
I1210 00:25:18.591980 869958 certs.go:484] found cert: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/cert.pem (1123 bytes)
I1210 00:25:18.592000 869958 certs.go:484] found cert: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/key.pem (1679 bytes)
I1210 00:25:18.592038 869958 certs.go:484] found cert: /home/jenkins/minikube-integration/20062-527107/.minikube/files/etc/ssl/certs/5339162.pem (1708 bytes)
I1210 00:25:18.592732 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1210 00:25:18.619479 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1210 00:25:18.644820 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1210 00:25:18.674166 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1210 00:25:18.705882 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/old-k8s-version-280963/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1210 00:25:18.743356 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/old-k8s-version-280963/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1210 00:25:18.767725 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/old-k8s-version-280963/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1210 00:25:18.792265 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/old-k8s-version-280963/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1210 00:25:18.816809 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/files/etc/ssl/certs/5339162.pem --> /usr/share/ca-certificates/5339162.pem (1708 bytes)
I1210 00:25:18.841526 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1210 00:25:18.865096 869958 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/certs/533916.pem --> /usr/share/ca-certificates/533916.pem (1338 bytes)
I1210 00:25:18.888554 869958 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1210 00:25:18.906216 869958 ssh_runner.go:195] Run: openssl version
I1210 00:25:18.911638 869958 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/533916.pem && ln -fs /usr/share/ca-certificates/533916.pem /etc/ssl/certs/533916.pem"
I1210 00:25:18.920873 869958 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/533916.pem
I1210 00:25:18.924625 869958 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Dec 9 23:51 /usr/share/ca-certificates/533916.pem
I1210 00:25:18.924680 869958 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/533916.pem
I1210 00:25:18.931840 869958 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/533916.pem /etc/ssl/certs/51391683.0"
I1210 00:25:18.941015 869958 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/5339162.pem && ln -fs /usr/share/ca-certificates/5339162.pem /etc/ssl/certs/5339162.pem"
I1210 00:25:18.950413 869958 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/5339162.pem
I1210 00:25:18.954364 869958 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Dec 9 23:51 /usr/share/ca-certificates/5339162.pem
I1210 00:25:18.954431 869958 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/5339162.pem
I1210 00:25:18.961069 869958 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/5339162.pem /etc/ssl/certs/3ec20f2e.0"
I1210 00:25:18.969677 869958 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1210 00:25:18.979122 869958 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1210 00:25:18.982999 869958 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Dec 9 23:44 /usr/share/ca-certificates/minikubeCA.pem
I1210 00:25:18.983088 869958 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1210 00:25:18.989542 869958 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1210 00:25:18.998349 869958 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1210 00:25:19.001915 869958 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1210 00:25:19.008722 869958 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1210 00:25:19.015556 869958 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1210 00:25:19.022056 869958 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1210 00:25:19.029008 869958 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1210 00:25:19.036092 869958 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
I1210 00:25:19.042762 869958 kubeadm.go:392] StartCluster: {Name:old-k8s-version-280963 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1730888964-19917@sha256:629a5748e3ec15a091fef12257eb3754b8ffc0c974ebcbb016451c65d1829615 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.20.0 ClusterName:old-k8s-version-280963 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/hom
e/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1210 00:25:19.042956 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1210 00:25:19.043016 869958 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1210 00:25:19.079621 869958 cri.go:89] found id: ""
I1210 00:25:19.079685 869958 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1210 00:25:19.088408 869958 kubeadm.go:408] found existing configuration files, will attempt cluster restart
I1210 00:25:19.088428 869958 kubeadm.go:593] restartPrimaryControlPlane start ...
I1210 00:25:19.088468 869958 ssh_runner.go:195] Run: sudo test -d /data/minikube
I1210 00:25:19.096662 869958 kubeadm.go:130] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I1210 00:25:19.097725 869958 kubeconfig.go:47] verify endpoint returned: get endpoint: "old-k8s-version-280963" does not appear in /home/jenkins/minikube-integration/20062-527107/kubeconfig
I1210 00:25:19.098351 869958 kubeconfig.go:62] /home/jenkins/minikube-integration/20062-527107/kubeconfig needs updating (will repair): [kubeconfig missing "old-k8s-version-280963" cluster setting kubeconfig missing "old-k8s-version-280963" context setting]
I1210 00:25:19.099421 869958 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20062-527107/kubeconfig: {Name:mk47c0b52ce4821be2777fdd40884aa11f573a8b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1210 00:25:19.101544 869958 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I1210 00:25:19.110651 869958 kubeadm.go:630] The running cluster does not require reconfiguration: 192.168.85.2
I1210 00:25:19.110692 869958 kubeadm.go:597] duration metric: took 22.258553ms to restartPrimaryControlPlane
I1210 00:25:19.110705 869958 kubeadm.go:394] duration metric: took 67.955274ms to StartCluster
I1210 00:25:19.110727 869958 settings.go:142] acquiring lock: {Name:mk0114e7c414efdfe48670d68c91542cc6018bea Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1210 00:25:19.110822 869958 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/20062-527107/kubeconfig
I1210 00:25:19.112623 869958 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20062-527107/kubeconfig: {Name:mk47c0b52ce4821be2777fdd40884aa11f573a8b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1210 00:25:19.112935 869958 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.20.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1210 00:25:19.113076 869958 addons.go:507] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:true default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1210 00:25:19.113168 869958 config.go:182] Loaded profile config "old-k8s-version-280963": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.20.0
I1210 00:25:19.113195 869958 addons.go:69] Setting storage-provisioner=true in profile "old-k8s-version-280963"
I1210 00:25:19.113220 869958 addons.go:234] Setting addon storage-provisioner=true in "old-k8s-version-280963"
I1210 00:25:19.113224 869958 addons.go:69] Setting dashboard=true in profile "old-k8s-version-280963"
W1210 00:25:19.113233 869958 addons.go:243] addon storage-provisioner should already be in state true
I1210 00:25:19.113238 869958 addons.go:234] Setting addon dashboard=true in "old-k8s-version-280963"
I1210 00:25:19.113241 869958 addons.go:69] Setting default-storageclass=true in profile "old-k8s-version-280963"
I1210 00:25:19.113268 869958 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-280963"
I1210 00:25:19.113272 869958 host.go:66] Checking if "old-k8s-version-280963" exists ...
W1210 00:25:19.113246 869958 addons.go:243] addon dashboard should already be in state true
I1210 00:25:19.113340 869958 host.go:66] Checking if "old-k8s-version-280963" exists ...
I1210 00:25:19.113266 869958 addons.go:69] Setting metrics-server=true in profile "old-k8s-version-280963"
I1210 00:25:19.113437 869958 addons.go:234] Setting addon metrics-server=true in "old-k8s-version-280963"
W1210 00:25:19.113449 869958 addons.go:243] addon metrics-server should already be in state true
I1210 00:25:19.113483 869958 host.go:66] Checking if "old-k8s-version-280963" exists ...
I1210 00:25:19.113610 869958 cli_runner.go:164] Run: docker container inspect old-k8s-version-280963 --format={{.State.Status}}
I1210 00:25:19.113776 869958 cli_runner.go:164] Run: docker container inspect old-k8s-version-280963 --format={{.State.Status}}
I1210 00:25:19.113921 869958 cli_runner.go:164] Run: docker container inspect old-k8s-version-280963 --format={{.State.Status}}
I1210 00:25:19.114062 869958 cli_runner.go:164] Run: docker container inspect old-k8s-version-280963 --format={{.State.Status}}
I1210 00:25:19.117140 869958 out.go:177] * Verifying Kubernetes components...
I1210 00:25:19.118522 869958 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1210 00:25:19.140966 869958 out.go:177] - Using image fake.domain/registry.k8s.io/echoserver:1.4
I1210 00:25:19.142496 869958 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I1210 00:25:19.142523 869958 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I1210 00:25:19.142586 869958 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-280963
I1210 00:25:19.143373 869958 addons.go:234] Setting addon default-storageclass=true in "old-k8s-version-280963"
W1210 00:25:19.143396 869958 addons.go:243] addon default-storageclass should already be in state true
I1210 00:25:19.143426 869958 host.go:66] Checking if "old-k8s-version-280963" exists ...
I1210 00:25:19.143988 869958 out.go:177] - Using image docker.io/kubernetesui/dashboard:v2.7.0
I1210 00:25:19.144201 869958 cli_runner.go:164] Run: docker container inspect old-k8s-version-280963 --format={{.State.Status}}
I1210 00:25:19.145095 869958 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1210 00:25:19.146708 869958 out.go:177] - Using image registry.k8s.io/echoserver:1.4
I1210 00:25:19.146831 869958 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1210 00:25:19.146870 869958 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1210 00:25:19.146930 869958 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-280963
I1210 00:25:19.155269 869958 addons.go:431] installing /etc/kubernetes/addons/dashboard-ns.yaml
I1210 00:25:19.155300 869958 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
I1210 00:25:19.155368 869958 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-280963
I1210 00:25:19.173431 869958 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33625 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/old-k8s-version-280963/id_rsa Username:docker}
I1210 00:25:19.175360 869958 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I1210 00:25:19.175386 869958 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1210 00:25:19.175466 869958 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-280963
I1210 00:25:19.176455 869958 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33625 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/old-k8s-version-280963/id_rsa Username:docker}
I1210 00:25:19.179586 869958 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33625 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/old-k8s-version-280963/id_rsa Username:docker}
I1210 00:25:19.200164 869958 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33625 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/old-k8s-version-280963/id_rsa Username:docker}
I1210 00:25:19.211097 869958 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1210 00:25:19.235931 869958 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-280963" to be "Ready" ...
I1210 00:25:19.289224 869958 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I1210 00:25:19.289253 869958 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1825 bytes)
I1210 00:25:19.292756 869958 addons.go:431] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
I1210 00:25:19.292785 869958 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
I1210 00:25:19.293449 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1210 00:25:19.308631 869958 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I1210 00:25:19.308664 869958 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I1210 00:25:19.311612 869958 addons.go:431] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
I1210 00:25:19.311637 869958 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
I1210 00:25:19.330888 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1210 00:25:19.332985 869958 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I1210 00:25:19.333012 869958 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I1210 00:25:19.342418 869958 addons.go:431] installing /etc/kubernetes/addons/dashboard-configmap.yaml
I1210 00:25:19.342451 869958 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
I1210 00:25:19.352183 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1210 00:25:19.362013 869958 addons.go:431] installing /etc/kubernetes/addons/dashboard-dp.yaml
I1210 00:25:19.362048 869958 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4201 bytes)
W1210 00:25:19.441057 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:19.441098 869958 retry.go:31] will retry after 136.557929ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:19.441340 869958 addons.go:431] installing /etc/kubernetes/addons/dashboard-role.yaml
I1210 00:25:19.441364 869958 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
I1210 00:25:19.461303 869958 addons.go:431] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
I1210 00:25:19.461332 869958 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
W1210 00:25:19.471682 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:19.471724 869958 retry.go:31] will retry after 256.782764ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
W1210 00:25:19.533140 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:19.533178 869958 retry.go:31] will retry after 356.790722ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:19.533418 869958 addons.go:431] installing /etc/kubernetes/addons/dashboard-sa.yaml
I1210 00:25:19.533443 869958 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
I1210 00:25:19.551744 869958 addons.go:431] installing /etc/kubernetes/addons/dashboard-secret.yaml
I1210 00:25:19.551774 869958 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
I1210 00:25:19.572234 869958 addons.go:431] installing /etc/kubernetes/addons/dashboard-svc.yaml
I1210 00:25:19.572272 869958 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
I1210 00:25:19.578363 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
I1210 00:25:19.590417 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
W1210 00:25:19.642318 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:19.642355 869958 retry.go:31] will retry after 409.498692ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
W1210 00:25:19.658043 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:19.658078 869958 retry.go:31] will retry after 336.192915ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:19.729280 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
W1210 00:25:19.790632 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:19.790674 869958 retry.go:31] will retry after 271.507932ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:19.891006 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
W1210 00:25:19.948782 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:19.948833 869958 retry.go:31] will retry after 480.017449ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:19.995178 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I1210 00:25:20.052617 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
W1210 00:25:20.055595 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:20.055634 869958 retry.go:31] will retry after 258.237786ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:20.062752 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
W1210 00:25:20.113816 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:20.113867 869958 retry.go:31] will retry after 337.236207ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
W1210 00:25:20.134111 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:20.134150 869958 retry.go:31] will retry after 452.648164ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:20.314818 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
W1210 00:25:20.376402 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:20.376441 869958 retry.go:31] will retry after 627.261557ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:20.429572 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1210 00:25:20.452029 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
W1210 00:25:20.502395 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:20.502427 869958 retry.go:31] will retry after 599.949333ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
W1210 00:25:20.534055 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:20.534115 869958 retry.go:31] will retry after 863.044778ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:20.587212 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
W1210 00:25:20.649358 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:20.649396 869958 retry.go:31] will retry after 867.15191ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:21.004468 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
W1210 00:25:21.066297 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:21.066332 869958 retry.go:31] will retry after 1.033510101s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:21.102521 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
W1210 00:25:21.163842 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:21.163885 869958 retry.go:31] will retry after 525.41308ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:21.237452 869958 node_ready.go:53] error getting node "old-k8s-version-280963": Get "https://192.168.85.2:8443/api/v1/nodes/old-k8s-version-280963": dial tcp 192.168.85.2:8443: connect: connection refused
I1210 00:25:21.397745 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
W1210 00:25:21.459896 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:21.459946 869958 retry.go:31] will retry after 1.529190224s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:21.517148 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
W1210 00:25:21.578278 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:21.578320 869958 retry.go:31] will retry after 1.470604524s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:21.690510 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
W1210 00:25:21.750121 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:21.750160 869958 retry.go:31] will retry after 1.298538372s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:22.100712 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
W1210 00:25:22.164081 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:22.164127 869958 retry.go:31] will retry after 1.216792297s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:22.990077 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
I1210 00:25:23.048850 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1210 00:25:23.048998 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
W1210 00:25:23.049271 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:23.049304 869958 retry.go:31] will retry after 1.666515899s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
W1210 00:25:23.109611 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:23.109655 869958 retry.go:31] will retry after 2.559329091s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
W1210 00:25:23.116198 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:23.116245 869958 retry.go:31] will retry after 2.643813451s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:23.382152 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
W1210 00:25:23.445733 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:23.445767 869958 retry.go:31] will retry after 1.352363896s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:23.736454 869958 node_ready.go:53] error getting node "old-k8s-version-280963": Get "https://192.168.85.2:8443/api/v1/nodes/old-k8s-version-280963": dial tcp 192.168.85.2:8443: connect: connection refused
I1210 00:25:24.717038 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
I1210 00:25:24.798881 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
W1210 00:25:24.945734 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:24.945869 869958 retry.go:31] will retry after 2.598618248s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
W1210 00:25:25.053822 869958 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:25.053860 869958 retry.go:31] will retry after 1.646204051s: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
I1210 00:25:25.669421 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1210 00:25:25.760567 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
I1210 00:25:26.700707 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I1210 00:25:27.545614 869958 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
I1210 00:25:29.533710 869958 node_ready.go:49] node "old-k8s-version-280963" has status "Ready":"True"
I1210 00:25:29.533820 869958 node_ready.go:38] duration metric: took 10.297855721s for node "old-k8s-version-280963" to be "Ready" ...
I1210 00:25:29.533846 869958 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I1210 00:25:29.639286 869958 pod_ready.go:79] waiting up to 6m0s for pod "coredns-74ff55c5b-45ksb" in "kube-system" namespace to be "Ready" ...
I1210 00:25:29.743362 869958 pod_ready.go:93] pod "coredns-74ff55c5b-45ksb" in "kube-system" namespace has status "Ready":"True"
I1210 00:25:29.743463 869958 pod_ready.go:82] duration metric: took 104.062424ms for pod "coredns-74ff55c5b-45ksb" in "kube-system" namespace to be "Ready" ...
I1210 00:25:29.743491 869958 pod_ready.go:79] waiting up to 6m0s for pod "etcd-old-k8s-version-280963" in "kube-system" namespace to be "Ready" ...
I1210 00:25:31.049317 869958 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (5.379819734s)
I1210 00:25:31.049428 869958 addons.go:475] Verifying addon metrics-server=true in "old-k8s-version-280963"
I1210 00:25:31.049432 869958 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: (5.288821092s)
I1210 00:25:31.634955 869958 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: (4.089287795s)
I1210 00:25:31.635039 869958 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.20.0/kubectl apply --force -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (4.93414481s)
I1210 00:25:31.636699 869958 out.go:177] * Some dashboard features require the metrics-server addon. To enable all features please run:
minikube -p old-k8s-version-280963 addons enable metrics-server
I1210 00:25:31.638191 869958 out.go:177] * Enabled addons: metrics-server, default-storageclass, storage-provisioner, dashboard
I1210 00:25:31.640257 869958 addons.go:510] duration metric: took 12.527203015s for enable addons: enabled=[metrics-server default-storageclass storage-provisioner dashboard]
I1210 00:25:31.755358 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:25:34.248918 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:25:36.249572 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:25:38.249821 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:25:40.749001 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:25:42.749856 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:25:45.249201 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:25:47.250189 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:25:49.751627 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:25:52.254716 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:25:54.751535 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:25:57.249845 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:25:59.750032 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:02.249952 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:04.250715 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:06.749892 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:09.249209 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:11.250233 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:13.749362 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:15.749804 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:17.751647 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:20.250023 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:22.251107 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:24.251687 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:26.750510 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:29.252385 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:31.751344 869958 pod_ready.go:103] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:32.254107 869958 pod_ready.go:93] pod "etcd-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"True"
I1210 00:26:32.254152 869958 pod_ready.go:82] duration metric: took 1m2.510643569s for pod "etcd-old-k8s-version-280963" in "kube-system" namespace to be "Ready" ...
I1210 00:26:32.254172 869958 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-old-k8s-version-280963" in "kube-system" namespace to be "Ready" ...
I1210 00:26:32.260304 869958 pod_ready.go:93] pod "kube-apiserver-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"True"
I1210 00:26:32.260332 869958 pod_ready.go:82] duration metric: took 6.135724ms for pod "kube-apiserver-old-k8s-version-280963" in "kube-system" namespace to be "Ready" ...
I1210 00:26:32.260347 869958 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-old-k8s-version-280963" in "kube-system" namespace to be "Ready" ...
I1210 00:26:34.268497 869958 pod_ready.go:103] pod "kube-controller-manager-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:36.766637 869958 pod_ready.go:103] pod "kube-controller-manager-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:39.266049 869958 pod_ready.go:103] pod "kube-controller-manager-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:41.267319 869958 pod_ready.go:103] pod "kube-controller-manager-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:43.765455 869958 pod_ready.go:103] pod "kube-controller-manager-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:45.768506 869958 pod_ready.go:103] pod "kube-controller-manager-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:48.267369 869958 pod_ready.go:103] pod "kube-controller-manager-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:50.767018 869958 pod_ready.go:103] pod "kube-controller-manager-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:53.267354 869958 pod_ready.go:93] pod "kube-controller-manager-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"True"
I1210 00:26:53.267387 869958 pod_ready.go:82] duration metric: took 21.007031158s for pod "kube-controller-manager-old-k8s-version-280963" in "kube-system" namespace to be "Ready" ...
I1210 00:26:53.267402 869958 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-qb2z4" in "kube-system" namespace to be "Ready" ...
I1210 00:26:53.272279 869958 pod_ready.go:93] pod "kube-proxy-qb2z4" in "kube-system" namespace has status "Ready":"True"
I1210 00:26:53.272302 869958 pod_ready.go:82] duration metric: took 4.892582ms for pod "kube-proxy-qb2z4" in "kube-system" namespace to be "Ready" ...
I1210 00:26:53.272311 869958 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-old-k8s-version-280963" in "kube-system" namespace to be "Ready" ...
I1210 00:26:55.278374 869958 pod_ready.go:103] pod "kube-scheduler-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:57.778574 869958 pod_ready.go:103] pod "kube-scheduler-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:26:59.780845 869958 pod_ready.go:103] pod "kube-scheduler-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:00.279229 869958 pod_ready.go:93] pod "kube-scheduler-old-k8s-version-280963" in "kube-system" namespace has status "Ready":"True"
I1210 00:27:00.279257 869958 pod_ready.go:82] duration metric: took 7.006938026s for pod "kube-scheduler-old-k8s-version-280963" in "kube-system" namespace to be "Ready" ...
I1210 00:27:00.279355 869958 pod_ready.go:79] waiting up to 6m0s for pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace to be "Ready" ...
I1210 00:27:02.285732 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:04.785454 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:06.785491 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:09.293784 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:11.784823 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:14.285252 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:16.285550 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:18.285842 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:20.286226 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:22.785073 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:24.785564 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:27.286243 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:29.803161 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:32.285546 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:34.286118 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:36.785634 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:38.785899 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:41.285224 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:43.285577 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:45.285991 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:47.784625 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:49.786301 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:52.285363 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:54.285460 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:56.786115 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:27:59.285254 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:01.285483 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:03.785176 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:05.785246 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:07.785452 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:10.284795 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:12.285232 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:14.785094 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:16.785660 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:19.287937 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:21.784924 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:23.785171 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:25.785921 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:28.285782 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:30.285823 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:32.785726 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:35.285944 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:37.785251 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:40.284596 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:42.285244 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:44.785547 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:47.284970 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:49.286322 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:51.786544 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:54.286136 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:56.287180 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:28:58.785990 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:00.786571 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:02.787217 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:05.284892 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:07.286113 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:09.287066 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:11.787108 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:14.286002 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:16.286339 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:18.785927 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:20.786144 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:23.285107 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:25.285691 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:27.786048 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:30.284394 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:32.285567 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:34.789978 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:37.285571 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:39.785467 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:41.786195 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:44.285629 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:46.286997 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:48.784966 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:50.785565 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:53.287484 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:55.785238 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:58.285947 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:00.784947 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:02.785627 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:05.285425 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:07.785233 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:10.285053 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:12.785346 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:15.284585 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:17.285141 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:19.785686 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:21.786049 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:24.285409 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:26.286022 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:28.785097 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:30.785142 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:33.285545 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:35.785510 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:38.284978 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:40.784638 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:42.784707 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:44.784825 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:47.285489 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:49.286104 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:51.785143 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:53.786017 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:56.285495 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:58.285964 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:31:00.285755 869958 pod_ready.go:82] duration metric: took 4m0.006380848s for pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace to be "Ready" ...
E1210 00:31:00.285781 869958 pod_ready.go:67] WaitExtra: waitPodCondition: context deadline exceeded
I1210 00:31:00.285790 869958 pod_ready.go:39] duration metric: took 5m30.751897187s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I1210 00:31:00.285822 869958 api_server.go:52] waiting for apiserver process to appear ...
I1210 00:31:00.285858 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1210 00:31:00.285917 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1210 00:31:00.324417 869958 cri.go:89] found id: "9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d"
I1210 00:31:00.324440 869958 cri.go:89] found id: ""
I1210 00:31:00.324448 869958 logs.go:282] 1 containers: [9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d]
I1210 00:31:00.324499 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.328595 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1210 00:31:00.328691 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1210 00:31:00.364828 869958 cri.go:89] found id: "de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2"
I1210 00:31:00.364857 869958 cri.go:89] found id: ""
I1210 00:31:00.364868 869958 logs.go:282] 1 containers: [de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2]
I1210 00:31:00.364938 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.368615 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1210 00:31:00.368696 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1210 00:31:00.403140 869958 cri.go:89] found id: "d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0"
I1210 00:31:00.403164 869958 cri.go:89] found id: ""
I1210 00:31:00.403174 869958 logs.go:282] 1 containers: [d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0]
I1210 00:31:00.403233 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.406693 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1210 00:31:00.406754 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1210 00:31:00.440261 869958 cri.go:89] found id: "e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07"
I1210 00:31:00.440286 869958 cri.go:89] found id: ""
I1210 00:31:00.440294 869958 logs.go:282] 1 containers: [e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07]
I1210 00:31:00.440356 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.443836 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1210 00:31:00.443908 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1210 00:31:00.478920 869958 cri.go:89] found id: "930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1"
I1210 00:31:00.478945 869958 cri.go:89] found id: ""
I1210 00:31:00.478955 869958 logs.go:282] 1 containers: [930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1]
I1210 00:31:00.479020 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.482648 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1210 00:31:00.482713 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1210 00:31:00.517931 869958 cri.go:89] found id: "7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82"
I1210 00:31:00.517959 869958 cri.go:89] found id: ""
I1210 00:31:00.517969 869958 logs.go:282] 1 containers: [7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82]
I1210 00:31:00.518027 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.522393 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1210 00:31:00.522470 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1210 00:31:00.558076 869958 cri.go:89] found id: "1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a"
I1210 00:31:00.558099 869958 cri.go:89] found id: ""
I1210 00:31:00.558107 869958 logs.go:282] 1 containers: [1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a]
I1210 00:31:00.558159 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.561741 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1210 00:31:00.561812 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1210 00:31:00.598626 869958 cri.go:89] found id: "b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e"
I1210 00:31:00.598664 869958 cri.go:89] found id: "5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24"
I1210 00:31:00.598674 869958 cri.go:89] found id: ""
I1210 00:31:00.598682 869958 logs.go:282] 2 containers: [b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e 5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24]
I1210 00:31:00.598746 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.602345 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.605648 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kubernetes-dashboard Namespaces:[]}
I1210 00:31:00.605713 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kubernetes-dashboard
I1210 00:31:00.638537 869958 cri.go:89] found id: "71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e"
I1210 00:31:00.638564 869958 cri.go:89] found id: ""
I1210 00:31:00.638574 869958 logs.go:282] 1 containers: [71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e]
I1210 00:31:00.638635 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.642267 869958 logs.go:123] Gathering logs for kubelet ...
I1210 00:31:00.642297 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W1210 00:31:00.684072 869958 logs.go:138] Found kubelet problem: Dec 10 00:25:36 old-k8s-version-280963 kubelet[1066]: E1210 00:25:36.013371 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:00.684251 869958 logs.go:138] Found kubelet problem: Dec 10 00:25:36 old-k8s-version-280963 kubelet[1066]: E1210 00:25:36.276847 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.686239 869958 logs.go:138] Found kubelet problem: Dec 10 00:25:49 old-k8s-version-280963 kubelet[1066]: E1210 00:25:49.092116 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:00.687741 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:00 old-k8s-version-280963 kubelet[1066]: E1210 00:26:00.341400 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 10s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.687978 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:01 old-k8s-version-280963 kubelet[1066]: E1210 00:26:01.348361 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 10s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.688111 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:02 old-k8s-version-280963 kubelet[1066]: E1210 00:26:02.063829 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.688445 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:02 old-k8s-version-280963 kubelet[1066]: E1210 00:26:02.351893 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 10s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.690436 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:14 old-k8s-version-280963 kubelet[1066]: E1210 00:26:14.082796 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:00.691134 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:16 old-k8s-version-280963 kubelet[1066]: E1210 00:26:16.385007 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.691375 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:20 old-k8s-version-280963 kubelet[1066]: E1210 00:26:20.929425 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.691523 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:25 old-k8s-version-280963 kubelet[1066]: E1210 00:26:25.063820 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.691758 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:32 old-k8s-version-280963 kubelet[1066]: E1210 00:26:32.063614 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.691889 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:37 old-k8s-version-280963 kubelet[1066]: E1210 00:26:37.063859 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.692313 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:45 old-k8s-version-280963 kubelet[1066]: E1210 00:26:45.451805 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.692572 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:50 old-k8s-version-280963 kubelet[1066]: E1210 00:26:50.929571 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.692717 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:51 old-k8s-version-280963 kubelet[1066]: E1210 00:26:51.063691 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.692950 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:04 old-k8s-version-280963 kubelet[1066]: E1210 00:27:04.063486 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.694659 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:06 old-k8s-version-280963 kubelet[1066]: E1210 00:27:06.100485 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:00.694960 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:16 old-k8s-version-280963 kubelet[1066]: E1210 00:27:16.063301 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.695110 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:18 old-k8s-version-280963 kubelet[1066]: E1210 00:27:18.063936 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.695245 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:29 old-k8s-version-280963 kubelet[1066]: E1210 00:27:29.063910 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.695668 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:30 old-k8s-version-280963 kubelet[1066]: E1210 00:27:30.551122 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.695901 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:31 old-k8s-version-280963 kubelet[1066]: E1210 00:27:31.554624 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.696137 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:42 old-k8s-version-280963 kubelet[1066]: E1210 00:27:42.063651 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.696291 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:43 old-k8s-version-280963 kubelet[1066]: E1210 00:27:43.063770 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.696535 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:54 old-k8s-version-280963 kubelet[1066]: E1210 00:27:54.063558 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.696667 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:55 old-k8s-version-280963 kubelet[1066]: E1210 00:27:55.063561 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.696899 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:05 old-k8s-version-280963 kubelet[1066]: E1210 00:28:05.063379 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.697036 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:10 old-k8s-version-280963 kubelet[1066]: E1210 00:28:10.063837 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.697268 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:18 old-k8s-version-280963 kubelet[1066]: E1210 00:28:18.063477 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.697399 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:23 old-k8s-version-280963 kubelet[1066]: E1210 00:28:23.063704 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.697631 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:29 old-k8s-version-280963 kubelet[1066]: E1210 00:28:29.063218 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.699384 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:36 old-k8s-version-280963 kubelet[1066]: E1210 00:28:36.089234 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:00.699619 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:40 old-k8s-version-280963 kubelet[1066]: E1210 00:28:40.063266 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.699750 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:50 old-k8s-version-280963 kubelet[1066]: E1210 00:28:50.063870 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.700169 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:55 old-k8s-version-280963 kubelet[1066]: E1210 00:28:55.726230 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.700403 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:00 old-k8s-version-280963 kubelet[1066]: E1210 00:29:00.929346 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.700534 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:01 old-k8s-version-280963 kubelet[1066]: E1210 00:29:01.063931 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.700665 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:12 old-k8s-version-280963 kubelet[1066]: E1210 00:29:12.063883 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.700897 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:13 old-k8s-version-280963 kubelet[1066]: E1210 00:29:13.063415 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.701157 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:25 old-k8s-version-280963 kubelet[1066]: E1210 00:29:25.063471 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.701316 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:25 old-k8s-version-280963 kubelet[1066]: E1210 00:29:25.063913 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.701550 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:36 old-k8s-version-280963 kubelet[1066]: E1210 00:29:36.063693 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.701682 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:36 old-k8s-version-280963 kubelet[1066]: E1210 00:29:36.063872 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.701914 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:49 old-k8s-version-280963 kubelet[1066]: E1210 00:29:49.063306 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.702050 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:50 old-k8s-version-280963 kubelet[1066]: E1210 00:29:50.063838 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.702287 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:03 old-k8s-version-280963 kubelet[1066]: E1210 00:30:03.063224 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.702419 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:05 old-k8s-version-280963 kubelet[1066]: E1210 00:30:05.063807 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.702550 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:17 old-k8s-version-280963 kubelet[1066]: E1210 00:30:17.063774 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.702784 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:18 old-k8s-version-280963 kubelet[1066]: E1210 00:30:18.063380 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.703080 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:29 old-k8s-version-280963 kubelet[1066]: E1210 00:30:29.063392 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.703219 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:32 old-k8s-version-280963 kubelet[1066]: E1210 00:30:32.063891 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.703456 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:41 old-k8s-version-280963 kubelet[1066]: E1210 00:30:41.063206 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.703587 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:44 old-k8s-version-280963 kubelet[1066]: E1210 00:30:44.064113 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.703818 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:54 old-k8s-version-280963 kubelet[1066]: E1210 00:30:54.063639 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.703952 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:56 old-k8s-version-280963 kubelet[1066]: E1210 00:30:56.063846 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I1210 00:31:00.703964 869958 logs.go:123] Gathering logs for kube-proxy [930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1] ...
I1210 00:31:00.703989 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1"
I1210 00:31:00.739278 869958 logs.go:123] Gathering logs for kube-controller-manager [7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82] ...
I1210 00:31:00.739323 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82"
I1210 00:31:00.809749 869958 logs.go:123] Gathering logs for storage-provisioner [5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24] ...
I1210 00:31:00.809793 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24"
I1210 00:31:00.843898 869958 logs.go:123] Gathering logs for containerd ...
I1210 00:31:00.843932 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1210 00:31:00.905189 869958 logs.go:123] Gathering logs for kube-apiserver [9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d] ...
I1210 00:31:00.905248 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d"
I1210 00:31:00.975171 869958 logs.go:123] Gathering logs for kube-scheduler [e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07] ...
I1210 00:31:00.975214 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07"
I1210 00:31:01.018685 869958 logs.go:123] Gathering logs for kubernetes-dashboard [71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e] ...
I1210 00:31:01.018727 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e"
I1210 00:31:01.055194 869958 logs.go:123] Gathering logs for dmesg ...
I1210 00:31:01.055228 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1210 00:31:01.082490 869958 logs.go:123] Gathering logs for describe nodes ...
I1210 00:31:01.082531 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1210 00:31:01.188477 869958 logs.go:123] Gathering logs for etcd [de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2] ...
I1210 00:31:01.188515 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2"
I1210 00:31:01.231162 869958 logs.go:123] Gathering logs for kindnet [1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a] ...
I1210 00:31:01.231200 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a"
I1210 00:31:01.270495 869958 logs.go:123] Gathering logs for storage-provisioner [b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e] ...
I1210 00:31:01.270532 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e"
I1210 00:31:01.304676 869958 logs.go:123] Gathering logs for container status ...
I1210 00:31:01.304717 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1210 00:31:01.342082 869958 logs.go:123] Gathering logs for coredns [d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0] ...
I1210 00:31:01.342114 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0"
I1210 00:31:01.377229 869958 out.go:358] Setting ErrFile to fd 2...
I1210 00:31:01.377257 869958 out.go:392] TERM=,COLORTERM=, which probably does not support color
W1210 00:31:01.377336 869958 out.go:270] X Problems detected in kubelet:
X Problems detected in kubelet:
W1210 00:31:01.377354 869958 out.go:270] Dec 10 00:30:32 old-k8s-version-280963 kubelet[1066]: E1210 00:30:32.063891 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Dec 10 00:30:32 old-k8s-version-280963 kubelet[1066]: E1210 00:30:32.063891 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:01.377363 869958 out.go:270] Dec 10 00:30:41 old-k8s-version-280963 kubelet[1066]: E1210 00:30:41.063206 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
Dec 10 00:30:41 old-k8s-version-280963 kubelet[1066]: E1210 00:30:41.063206 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:01.377375 869958 out.go:270] Dec 10 00:30:44 old-k8s-version-280963 kubelet[1066]: E1210 00:30:44.064113 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Dec 10 00:30:44 old-k8s-version-280963 kubelet[1066]: E1210 00:30:44.064113 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:01.377384 869958 out.go:270] Dec 10 00:30:54 old-k8s-version-280963 kubelet[1066]: E1210 00:30:54.063639 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
Dec 10 00:30:54 old-k8s-version-280963 kubelet[1066]: E1210 00:30:54.063639 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:01.377397 869958 out.go:270] Dec 10 00:30:56 old-k8s-version-280963 kubelet[1066]: E1210 00:30:56.063846 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Dec 10 00:30:56 old-k8s-version-280963 kubelet[1066]: E1210 00:30:56.063846 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I1210 00:31:01.377405 869958 out.go:358] Setting ErrFile to fd 2...
I1210 00:31:01.377416 869958 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1210 00:31:11.378266 869958 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1210 00:31:11.390994 869958 api_server.go:72] duration metric: took 5m52.278015509s to wait for apiserver process to appear ...
I1210 00:31:11.391028 869958 api_server.go:88] waiting for apiserver healthz status ...
I1210 00:31:11.391084 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1210 00:31:11.391155 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1210 00:31:11.425078 869958 cri.go:89] found id: "9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d"
I1210 00:31:11.425104 869958 cri.go:89] found id: ""
I1210 00:31:11.425113 869958 logs.go:282] 1 containers: [9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d]
I1210 00:31:11.425183 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.428759 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1210 00:31:11.428836 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1210 00:31:11.463276 869958 cri.go:89] found id: "de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2"
I1210 00:31:11.463305 869958 cri.go:89] found id: ""
I1210 00:31:11.463313 869958 logs.go:282] 1 containers: [de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2]
I1210 00:31:11.463360 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.467102 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1210 00:31:11.467171 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1210 00:31:11.503957 869958 cri.go:89] found id: "d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0"
I1210 00:31:11.504006 869958 cri.go:89] found id: ""
I1210 00:31:11.504016 869958 logs.go:282] 1 containers: [d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0]
I1210 00:31:11.504079 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.507966 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1210 00:31:11.508041 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1210 00:31:11.542392 869958 cri.go:89] found id: "e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07"
I1210 00:31:11.542415 869958 cri.go:89] found id: ""
I1210 00:31:11.542422 869958 logs.go:282] 1 containers: [e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07]
I1210 00:31:11.542484 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.546043 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1210 00:31:11.546105 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1210 00:31:11.583274 869958 cri.go:89] found id: "930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1"
I1210 00:31:11.583305 869958 cri.go:89] found id: ""
I1210 00:31:11.583316 869958 logs.go:282] 1 containers: [930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1]
I1210 00:31:11.583376 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.587533 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1210 00:31:11.587622 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1210 00:31:11.622287 869958 cri.go:89] found id: "7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82"
I1210 00:31:11.622329 869958 cri.go:89] found id: ""
I1210 00:31:11.622338 869958 logs.go:282] 1 containers: [7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82]
I1210 00:31:11.622399 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.626227 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1210 00:31:11.626300 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1210 00:31:11.661096 869958 cri.go:89] found id: "1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a"
I1210 00:31:11.661119 869958 cri.go:89] found id: ""
I1210 00:31:11.661126 869958 logs.go:282] 1 containers: [1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a]
I1210 00:31:11.661173 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.664907 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kubernetes-dashboard Namespaces:[]}
I1210 00:31:11.664974 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kubernetes-dashboard
I1210 00:31:11.701413 869958 cri.go:89] found id: "71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e"
I1210 00:31:11.701439 869958 cri.go:89] found id: ""
I1210 00:31:11.701448 869958 logs.go:282] 1 containers: [71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e]
I1210 00:31:11.701498 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.705199 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1210 00:31:11.705268 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1210 00:31:11.739637 869958 cri.go:89] found id: "b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e"
I1210 00:31:11.739669 869958 cri.go:89] found id: "5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24"
I1210 00:31:11.739674 869958 cri.go:89] found id: ""
I1210 00:31:11.739682 869958 logs.go:282] 2 containers: [b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e 5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24]
I1210 00:31:11.739748 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.743857 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.747864 869958 logs.go:123] Gathering logs for kube-scheduler [e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07] ...
I1210 00:31:11.747897 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07"
I1210 00:31:11.787539 869958 logs.go:123] Gathering logs for kube-controller-manager [7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82] ...
I1210 00:31:11.787577 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82"
I1210 00:31:11.854239 869958 logs.go:123] Gathering logs for kubernetes-dashboard [71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e] ...
I1210 00:31:11.854286 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e"
I1210 00:31:11.890628 869958 logs.go:123] Gathering logs for storage-provisioner [5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24] ...
I1210 00:31:11.890659 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24"
I1210 00:31:11.924933 869958 logs.go:123] Gathering logs for dmesg ...
I1210 00:31:11.924977 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1210 00:31:11.952597 869958 logs.go:123] Gathering logs for kube-apiserver [9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d] ...
I1210 00:31:11.952639 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d"
I1210 00:31:12.008186 869958 logs.go:123] Gathering logs for etcd [de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2] ...
I1210 00:31:12.008225 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2"
I1210 00:31:12.050981 869958 logs.go:123] Gathering logs for kindnet [1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a] ...
I1210 00:31:12.051019 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a"
I1210 00:31:12.092306 869958 logs.go:123] Gathering logs for storage-provisioner [b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e] ...
I1210 00:31:12.092348 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e"
I1210 00:31:12.126824 869958 logs.go:123] Gathering logs for kubelet ...
I1210 00:31:12.126877 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W1210 00:31:12.167149 869958 logs.go:138] Found kubelet problem: Dec 10 00:25:36 old-k8s-version-280963 kubelet[1066]: E1210 00:25:36.013371 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:12.167339 869958 logs.go:138] Found kubelet problem: Dec 10 00:25:36 old-k8s-version-280963 kubelet[1066]: E1210 00:25:36.276847 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.169400 869958 logs.go:138] Found kubelet problem: Dec 10 00:25:49 old-k8s-version-280963 kubelet[1066]: E1210 00:25:49.092116 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:12.170983 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:00 old-k8s-version-280963 kubelet[1066]: E1210 00:26:00.341400 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 10s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.171225 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:01 old-k8s-version-280963 kubelet[1066]: E1210 00:26:01.348361 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 10s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.171364 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:02 old-k8s-version-280963 kubelet[1066]: E1210 00:26:02.063829 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.171704 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:02 old-k8s-version-280963 kubelet[1066]: E1210 00:26:02.351893 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 10s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.173755 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:14 old-k8s-version-280963 kubelet[1066]: E1210 00:26:14.082796 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:12.174505 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:16 old-k8s-version-280963 kubelet[1066]: E1210 00:26:16.385007 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.174745 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:20 old-k8s-version-280963 kubelet[1066]: E1210 00:26:20.929425 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.174905 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:25 old-k8s-version-280963 kubelet[1066]: E1210 00:26:25.063820 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.175143 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:32 old-k8s-version-280963 kubelet[1066]: E1210 00:26:32.063614 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.175321 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:37 old-k8s-version-280963 kubelet[1066]: E1210 00:26:37.063859 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.175745 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:45 old-k8s-version-280963 kubelet[1066]: E1210 00:26:45.451805 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.175980 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:50 old-k8s-version-280963 kubelet[1066]: E1210 00:26:50.929571 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.176120 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:51 old-k8s-version-280963 kubelet[1066]: E1210 00:26:51.063691 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.176358 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:04 old-k8s-version-280963 kubelet[1066]: E1210 00:27:04.063486 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.178089 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:06 old-k8s-version-280963 kubelet[1066]: E1210 00:27:06.100485 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:12.178356 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:16 old-k8s-version-280963 kubelet[1066]: E1210 00:27:16.063301 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.178495 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:18 old-k8s-version-280963 kubelet[1066]: E1210 00:27:18.063936 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.178628 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:29 old-k8s-version-280963 kubelet[1066]: E1210 00:27:29.063910 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.179087 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:30 old-k8s-version-280963 kubelet[1066]: E1210 00:27:30.551122 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.179328 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:31 old-k8s-version-280963 kubelet[1066]: E1210 00:27:31.554624 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.179563 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:42 old-k8s-version-280963 kubelet[1066]: E1210 00:27:42.063651 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.179696 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:43 old-k8s-version-280963 kubelet[1066]: E1210 00:27:43.063770 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.179934 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:54 old-k8s-version-280963 kubelet[1066]: E1210 00:27:54.063558 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.180068 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:55 old-k8s-version-280963 kubelet[1066]: E1210 00:27:55.063561 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.180308 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:05 old-k8s-version-280963 kubelet[1066]: E1210 00:28:05.063379 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.180463 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:10 old-k8s-version-280963 kubelet[1066]: E1210 00:28:10.063837 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.180701 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:18 old-k8s-version-280963 kubelet[1066]: E1210 00:28:18.063477 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.180836 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:23 old-k8s-version-280963 kubelet[1066]: E1210 00:28:23.063704 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.181073 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:29 old-k8s-version-280963 kubelet[1066]: E1210 00:28:29.063218 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.182823 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:36 old-k8s-version-280963 kubelet[1066]: E1210 00:28:36.089234 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:12.183092 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:40 old-k8s-version-280963 kubelet[1066]: E1210 00:28:40.063266 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.183227 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:50 old-k8s-version-280963 kubelet[1066]: E1210 00:28:50.063870 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.183655 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:55 old-k8s-version-280963 kubelet[1066]: E1210 00:28:55.726230 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.183890 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:00 old-k8s-version-280963 kubelet[1066]: E1210 00:29:00.929346 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.184024 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:01 old-k8s-version-280963 kubelet[1066]: E1210 00:29:01.063931 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.184157 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:12 old-k8s-version-280963 kubelet[1066]: E1210 00:29:12.063883 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.184400 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:13 old-k8s-version-280963 kubelet[1066]: E1210 00:29:13.063415 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.184636 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:25 old-k8s-version-280963 kubelet[1066]: E1210 00:29:25.063471 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.184769 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:25 old-k8s-version-280963 kubelet[1066]: E1210 00:29:25.063913 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.185004 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:36 old-k8s-version-280963 kubelet[1066]: E1210 00:29:36.063693 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.185138 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:36 old-k8s-version-280963 kubelet[1066]: E1210 00:29:36.063872 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.185382 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:49 old-k8s-version-280963 kubelet[1066]: E1210 00:29:49.063306 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.185515 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:50 old-k8s-version-280963 kubelet[1066]: E1210 00:29:50.063838 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.185750 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:03 old-k8s-version-280963 kubelet[1066]: E1210 00:30:03.063224 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.185883 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:05 old-k8s-version-280963 kubelet[1066]: E1210 00:30:05.063807 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.186018 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:17 old-k8s-version-280963 kubelet[1066]: E1210 00:30:17.063774 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.186253 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:18 old-k8s-version-280963 kubelet[1066]: E1210 00:30:18.063380 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.186506 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:29 old-k8s-version-280963 kubelet[1066]: E1210 00:30:29.063392 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.186644 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:32 old-k8s-version-280963 kubelet[1066]: E1210 00:30:32.063891 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.186900 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:41 old-k8s-version-280963 kubelet[1066]: E1210 00:30:41.063206 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.187083 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:44 old-k8s-version-280963 kubelet[1066]: E1210 00:30:44.064113 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.187488 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:54 old-k8s-version-280963 kubelet[1066]: E1210 00:30:54.063639 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.187699 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:56 old-k8s-version-280963 kubelet[1066]: E1210 00:30:56.063846 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.188009 869958 logs.go:138] Found kubelet problem: Dec 10 00:31:06 old-k8s-version-280963 kubelet[1066]: E1210 00:31:06.063272 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.188179 869958 logs.go:138] Found kubelet problem: Dec 10 00:31:09 old-k8s-version-280963 kubelet[1066]: E1210 00:31:09.063618 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I1210 00:31:12.188199 869958 logs.go:123] Gathering logs for describe nodes ...
I1210 00:31:12.188219 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1210 00:31:12.291065 869958 logs.go:123] Gathering logs for kube-proxy [930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1] ...
I1210 00:31:12.291103 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1"
I1210 00:31:12.325400 869958 logs.go:123] Gathering logs for containerd ...
I1210 00:31:12.325437 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1210 00:31:12.385096 869958 logs.go:123] Gathering logs for coredns [d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0] ...
I1210 00:31:12.385143 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0"
I1210 00:31:12.421781 869958 logs.go:123] Gathering logs for container status ...
I1210 00:31:12.421815 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1210 00:31:12.458769 869958 out.go:358] Setting ErrFile to fd 2...
I1210 00:31:12.458797 869958 out.go:392] TERM=,COLORTERM=, which probably does not support color
W1210 00:31:12.458963 869958 out.go:270] X Problems detected in kubelet:
X Problems detected in kubelet:
W1210 00:31:12.458980 869958 out.go:270] Dec 10 00:30:44 old-k8s-version-280963 kubelet[1066]: E1210 00:30:44.064113 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Dec 10 00:30:44 old-k8s-version-280963 kubelet[1066]: E1210 00:30:44.064113 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.458988 869958 out.go:270] Dec 10 00:30:54 old-k8s-version-280963 kubelet[1066]: E1210 00:30:54.063639 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
Dec 10 00:30:54 old-k8s-version-280963 kubelet[1066]: E1210 00:30:54.063639 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.459000 869958 out.go:270] Dec 10 00:30:56 old-k8s-version-280963 kubelet[1066]: E1210 00:30:56.063846 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Dec 10 00:30:56 old-k8s-version-280963 kubelet[1066]: E1210 00:30:56.063846 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.459010 869958 out.go:270] Dec 10 00:31:06 old-k8s-version-280963 kubelet[1066]: E1210 00:31:06.063272 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
Dec 10 00:31:06 old-k8s-version-280963 kubelet[1066]: E1210 00:31:06.063272 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.459023 869958 out.go:270] Dec 10 00:31:09 old-k8s-version-280963 kubelet[1066]: E1210 00:31:09.063618 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Dec 10 00:31:09 old-k8s-version-280963 kubelet[1066]: E1210 00:31:09.063618 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I1210 00:31:12.459048 869958 out.go:358] Setting ErrFile to fd 2...
I1210 00:31:12.459062 869958 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1210 00:31:22.460270 869958 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1210 00:31:22.467259 869958 api_server.go:279] https://192.168.85.2:8443/healthz returned 200:
ok
I1210 00:31:22.469499 869958 out.go:201]
W1210 00:31:22.470824 869958 out.go:270] X Exiting due to K8S_UNHEALTHY_CONTROL_PLANE: wait 6m0s for node: wait for healthy API server: controlPlane never updated to v1.20.0
X Exiting due to K8S_UNHEALTHY_CONTROL_PLANE: wait 6m0s for node: wait for healthy API server: controlPlane never updated to v1.20.0
W1210 00:31:22.470878 869958 out.go:270] * Suggestion: Control Plane could not update, try minikube delete --all --purge
* Suggestion: Control Plane could not update, try minikube delete --all --purge
W1210 00:31:22.470901 869958 out.go:270] * Related issue: https://github.com/kubernetes/minikube/issues/11417
* Related issue: https://github.com/kubernetes/minikube/issues/11417
W1210 00:31:22.470913 869958 out.go:270] *
*
W1210 00:31:22.472041 869958 out.go:293] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
I1210 00:31:22.473975 869958 out.go:201]
** /stderr **
start_stop_delete_test.go:259: failed to start minikube post-stop. args "out/minikube-linux-amd64 start -p old-k8s-version-280963 --memory=2200 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.20.0": exit status 102
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/SecondStart]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect old-k8s-version-280963
helpers_test.go:235: (dbg) docker inspect old-k8s-version-280963:
-- stdout --
[
{
"Id": "8b9e5f9136a718f848f024d4c77415a39541d5503b6eff1df9f19c9a53ce350a",
"Created": "2024-12-10T00:22:28.923428486Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 870250,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-12-10T00:25:05.301512011Z",
"FinishedAt": "2024-12-10T00:25:04.3636608Z"
},
"Image": "sha256:1a0bf2062289d31d12b734a031220306d830691a529a6eae8b4c8f4049e20571",
"ResolvConfPath": "/var/lib/docker/containers/8b9e5f9136a718f848f024d4c77415a39541d5503b6eff1df9f19c9a53ce350a/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/8b9e5f9136a718f848f024d4c77415a39541d5503b6eff1df9f19c9a53ce350a/hostname",
"HostsPath": "/var/lib/docker/containers/8b9e5f9136a718f848f024d4c77415a39541d5503b6eff1df9f19c9a53ce350a/hosts",
"LogPath": "/var/lib/docker/containers/8b9e5f9136a718f848f024d4c77415a39541d5503b6eff1df9f19c9a53ce350a/8b9e5f9136a718f848f024d4c77415a39541d5503b6eff1df9f19c9a53ce350a-json.log",
"Name": "/old-k8s-version-280963",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-280963:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-280963",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 2306867200,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 4613734400,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/0439f432a0d9ef4f820fd26f56c91315ad3e226b5d1f39453458892acb5b101d-init/diff:/var/lib/docker/overlay2/bae8d7d00d99e063ddf62cc977f255b7c2fa4bde63ebe9a612d21991917b231b/diff",
"MergedDir": "/var/lib/docker/overlay2/0439f432a0d9ef4f820fd26f56c91315ad3e226b5d1f39453458892acb5b101d/merged",
"UpperDir": "/var/lib/docker/overlay2/0439f432a0d9ef4f820fd26f56c91315ad3e226b5d1f39453458892acb5b101d/diff",
"WorkDir": "/var/lib/docker/overlay2/0439f432a0d9ef4f820fd26f56c91315ad3e226b5d1f39453458892acb5b101d/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-280963",
"Source": "/var/lib/docker/volumes/old-k8s-version-280963/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-280963",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1730888964-19917@sha256:629a5748e3ec15a091fef12257eb3754b8ffc0c974ebcbb016451c65d1829615",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-280963",
"name.minikube.sigs.k8s.io": "old-k8s-version-280963",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "b52493461069750ca2a8a82adb0dacddd1abb3d0678d043741666eb89f68be76",
"SandboxKey": "/var/run/docker/netns/b52493461069",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33625"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33626"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33629"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33627"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33628"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"old-k8s-version-280963": {
"IPAMConfig": {
"IPv4Address": "192.168.85.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:55:02",
"DriverOpts": null,
"NetworkID": "b7562d0c2da9c6ac6ff2d5d7a94c372cb75974da8a5912a88e6a51c8f16e809a",
"EndpointID": "882b0db8972014e7b722b37ee737c2eef86618053847fbc8cef86c41dd4e66ad",
"Gateway": "192.168.85.1",
"IPAddress": "192.168.85.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-280963",
"8b9e5f9136a7"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-280963 -n old-k8s-version-280963
helpers_test.go:244: <<< TestStartStop/group/old-k8s-version/serial/SecondStart FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/SecondStart]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-280963 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-280963 logs -n 25: (1.215361107s)
helpers_test.go:252: TestStartStop/group/old-k8s-version/serial/SecondStart logs:
-- stdout --
==> Audit <==
|---------|--------------------------------------------------------|------------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|--------------------------------------------------------|------------------------------|---------|---------|---------------------|---------------------|
| image | embed-certs-757313 image list | embed-certs-757313 | jenkins | v1.34.0 | 10 Dec 24 00:28 UTC | 10 Dec 24 00:28 UTC |
| | --format=json | | | | | |
| pause | -p embed-certs-757313 | embed-certs-757313 | jenkins | v1.34.0 | 10 Dec 24 00:28 UTC | 10 Dec 24 00:28 UTC |
| | --alsologtostderr -v=1 | | | | | |
| unpause | -p embed-certs-757313 | embed-certs-757313 | jenkins | v1.34.0 | 10 Dec 24 00:28 UTC | 10 Dec 24 00:28 UTC |
| | --alsologtostderr -v=1 | | | | | |
| delete | -p embed-certs-757313 | embed-certs-757313 | jenkins | v1.34.0 | 10 Dec 24 00:28 UTC | 10 Dec 24 00:28 UTC |
| delete | -p embed-certs-757313 | embed-certs-757313 | jenkins | v1.34.0 | 10 Dec 24 00:28 UTC | 10 Dec 24 00:28 UTC |
| start | -p newest-cni-451721 --memory=2200 --alsologtostderr | newest-cni-451721 | jenkins | v1.34.0 | 10 Dec 24 00:28 UTC | 10 Dec 24 00:29 UTC |
| | --wait=apiserver,system_pods,default_sa | | | | | |
| | --feature-gates ServerSideApply=true | | | | | |
| | --network-plugin=cni | | | | | |
| | --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 | | | | | |
| | --driver=docker --container-runtime=containerd | | | | | |
| | --kubernetes-version=v1.31.2 | | | | | |
| image | default-k8s-diff-port-337138 | default-k8s-diff-port-337138 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| | image list --format=json | | | | | |
| pause | -p | default-k8s-diff-port-337138 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| | default-k8s-diff-port-337138 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| unpause | -p | default-k8s-diff-port-337138 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| | default-k8s-diff-port-337138 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| delete | -p | default-k8s-diff-port-337138 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| | default-k8s-diff-port-337138 | | | | | |
| delete | -p | default-k8s-diff-port-337138 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| | default-k8s-diff-port-337138 | | | | | |
| image | no-preload-073501 image list | no-preload-073501 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| | --format=json | | | | | |
| pause | -p no-preload-073501 | no-preload-073501 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| | --alsologtostderr -v=1 | | | | | |
| unpause | -p no-preload-073501 | no-preload-073501 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| | --alsologtostderr -v=1 | | | | | |
| delete | -p no-preload-073501 | no-preload-073501 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| delete | -p no-preload-073501 | no-preload-073501 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| addons | enable metrics-server -p newest-cni-451721 | newest-cni-451721 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| | --images=MetricsServer=registry.k8s.io/echoserver:1.4 | | | | | |
| | --registries=MetricsServer=fake.domain | | | | | |
| stop | -p newest-cni-451721 | newest-cni-451721 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| | --alsologtostderr -v=3 | | | | | |
| addons | enable dashboard -p newest-cni-451721 | newest-cni-451721 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| | --images=MetricsScraper=registry.k8s.io/echoserver:1.4 | | | | | |
| start | -p newest-cni-451721 --memory=2200 --alsologtostderr | newest-cni-451721 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| | --wait=apiserver,system_pods,default_sa | | | | | |
| | --feature-gates ServerSideApply=true | | | | | |
| | --network-plugin=cni | | | | | |
| | --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 | | | | | |
| | --driver=docker --container-runtime=containerd | | | | | |
| | --kubernetes-version=v1.31.2 | | | | | |
| image | newest-cni-451721 image list | newest-cni-451721 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| | --format=json | | | | | |
| pause | -p newest-cni-451721 | newest-cni-451721 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| | --alsologtostderr -v=1 | | | | | |
| unpause | -p newest-cni-451721 | newest-cni-451721 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| | --alsologtostderr -v=1 | | | | | |
| delete | -p newest-cni-451721 | newest-cni-451721 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
| delete | -p newest-cni-451721 | newest-cni-451721 | jenkins | v1.34.0 | 10 Dec 24 00:29 UTC | 10 Dec 24 00:29 UTC |
|---------|--------------------------------------------------------|------------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/12/10 00:29:28
Running on machine: ubuntu-20-agent-6
Binary: Built with gc go1.23.2 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1210 00:29:28.052619 887464 out.go:345] Setting OutFile to fd 1 ...
I1210 00:29:28.052886 887464 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1210 00:29:28.052895 887464 out.go:358] Setting ErrFile to fd 2...
I1210 00:29:28.052898 887464 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1210 00:29:28.053105 887464 root.go:338] Updating PATH: /home/jenkins/minikube-integration/20062-527107/.minikube/bin
I1210 00:29:28.053702 887464 out.go:352] Setting JSON to false
I1210 00:29:28.054886 887464 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent-6","uptime":11512,"bootTime":1733779056,"procs":269,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1071-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1210 00:29:28.054998 887464 start.go:139] virtualization: kvm guest
I1210 00:29:28.057226 887464 out.go:177] * [newest-cni-451721] minikube v1.34.0 on Ubuntu 20.04 (kvm/amd64)
I1210 00:29:28.058464 887464 out.go:177] - MINIKUBE_LOCATION=20062
I1210 00:29:28.058466 887464 notify.go:220] Checking for updates...
I1210 00:29:28.059653 887464 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1210 00:29:28.061038 887464 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/20062-527107/kubeconfig
I1210 00:29:28.062198 887464 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/20062-527107/.minikube
I1210 00:29:28.063507 887464 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I1210 00:29:28.064754 887464 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I1210 00:29:28.066267 887464 config.go:182] Loaded profile config "newest-cni-451721": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.31.2
I1210 00:29:28.066916 887464 driver.go:394] Setting default libvirt URI to qemu:///system
I1210 00:29:28.092208 887464 docker.go:123] docker version: linux-27.4.0:Docker Engine - Community
I1210 00:29:28.092359 887464 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1210 00:29:28.142299 887464 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:2 ContainersRunning:1 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:44 OomKillDisable:true NGoroutines:61 SystemTime:2024-12-10 00:29:28.132746908 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1071-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647935488 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:27.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:88bf19b2105c8b17560993bee28a01ddc2f97182 Expected:88bf19b2105c8b17560993bee28a01ddc2f97182} RuncCommit:{ID:v1.2.2-0-g7cb3632 Expected:v1.2.2-0-g7cb3632} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: bridg
e-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.19.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.31.0] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1210 00:29:28.142419 887464 docker.go:318] overlay module found
I1210 00:29:28.144233 887464 out.go:177] * Using the docker driver based on existing profile
I1210 00:29:28.145516 887464 start.go:297] selected driver: docker
I1210 00:29:28.145551 887464 start.go:901] validating driver "docker" against &{Name:newest-cni-451721 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1730888964-19917@sha256:629a5748e3ec15a091fef12257eb3754b8ffc0c974ebcbb016451c65d1829615 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.2 ClusterName:newest-cni-451721 Namespace:default APIServerHAVIP: APIServerNa
me:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates:ServerSideApply=true ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.31.2 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet
: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1210 00:29:28.145689 887464 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1210 00:29:28.146625 887464 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1210 00:29:28.194993 887464 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:2 ContainersRunning:1 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:44 OomKillDisable:true NGoroutines:61 SystemTime:2024-12-10 00:29:28.186038818 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1071-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647935488 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:27.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:88bf19b2105c8b17560993bee28a01ddc2f97182 Expected:88bf19b2105c8b17560993bee28a01ddc2f97182} RuncCommit:{ID:v1.2.2-0-g7cb3632 Expected:v1.2.2-0-g7cb3632} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: bridg
e-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.19.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.31.0] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1210 00:29:28.195544 887464 start_flags.go:966] Waiting for components: map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true]
I1210 00:29:28.195588 887464 cni.go:84] Creating CNI manager for ""
I1210 00:29:28.195648 887464 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1210 00:29:28.195721 887464 start.go:340] cluster config:
{Name:newest-cni-451721 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1730888964-19917@sha256:629a5748e3ec15a091fef12257eb3754b8ffc0c974ebcbb016451c65d1829615 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.2 ClusterName:newest-cni-451721 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Containe
rRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates:ServerSideApply=true ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.31.2 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false Mo
untString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1210 00:29:28.197682 887464 out.go:177] * Starting "newest-cni-451721" primary control-plane node in "newest-cni-451721" cluster
I1210 00:29:28.198793 887464 cache.go:121] Beginning downloading kic base image for docker with containerd
I1210 00:29:28.200128 887464 out.go:177] * Pulling base image v0.0.45-1730888964-19917 ...
I1210 00:29:28.201266 887464 preload.go:131] Checking if preload exists for k8s version v1.31.2 and runtime containerd
I1210 00:29:28.201310 887464 preload.go:146] Found local preload: /home/jenkins/minikube-integration/20062-527107/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.2-containerd-overlay2-amd64.tar.lz4
I1210 00:29:28.201308 887464 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1730888964-19917@sha256:629a5748e3ec15a091fef12257eb3754b8ffc0c974ebcbb016451c65d1829615 in local docker daemon
I1210 00:29:28.201325 887464 cache.go:56] Caching tarball of preloaded images
I1210 00:29:28.201554 887464 preload.go:172] Found /home/jenkins/minikube-integration/20062-527107/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.2-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I1210 00:29:28.201573 887464 cache.go:59] Finished verifying existence of preloaded tar for v1.31.2 on containerd
I1210 00:29:28.201742 887464 profile.go:143] Saving config to /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/newest-cni-451721/config.json ...
I1210 00:29:28.225299 887464 image.go:98] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1730888964-19917@sha256:629a5748e3ec15a091fef12257eb3754b8ffc0c974ebcbb016451c65d1829615 in local docker daemon, skipping pull
I1210 00:29:28.225322 887464 cache.go:144] gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1730888964-19917@sha256:629a5748e3ec15a091fef12257eb3754b8ffc0c974ebcbb016451c65d1829615 exists in daemon, skipping load
I1210 00:29:28.225339 887464 cache.go:194] Successfully downloaded all kic artifacts
I1210 00:29:28.225381 887464 start.go:360] acquireMachinesLock for newest-cni-451721: {Name:mk18bdae31d39ddc90280f156a2e9122e8fc8159 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1210 00:29:28.225443 887464 start.go:364] duration metric: took 40.017µs to acquireMachinesLock for "newest-cni-451721"
I1210 00:29:28.225460 887464 start.go:96] Skipping create...Using existing machine configuration
I1210 00:29:28.225465 887464 fix.go:54] fixHost starting:
I1210 00:29:28.225678 887464 cli_runner.go:164] Run: docker container inspect newest-cni-451721 --format={{.State.Status}}
I1210 00:29:28.243713 887464 fix.go:112] recreateIfNeeded on newest-cni-451721: state=Stopped err=<nil>
W1210 00:29:28.243748 887464 fix.go:138] unexpected machine state, will restart: <nil>
I1210 00:29:28.245424 887464 out.go:177] * Restarting existing docker container for "newest-cni-451721" ...
I1210 00:29:25.285691 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:27.786048 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:28.246623 887464 cli_runner.go:164] Run: docker start newest-cni-451721
I1210 00:29:28.522403 887464 cli_runner.go:164] Run: docker container inspect newest-cni-451721 --format={{.State.Status}}
I1210 00:29:28.541982 887464 kic.go:430] container "newest-cni-451721" state is running.
I1210 00:29:28.542412 887464 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" newest-cni-451721
I1210 00:29:28.561718 887464 profile.go:143] Saving config to /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/newest-cni-451721/config.json ...
I1210 00:29:28.561949 887464 machine.go:93] provisionDockerMachine start ...
I1210 00:29:28.562009 887464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" newest-cni-451721
I1210 00:29:28.581250 887464 main.go:141] libmachine: Using SSH client type: native
I1210 00:29:28.581490 887464 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x866ca0] 0x869980 <nil> [] 0s} 127.0.0.1 33635 <nil> <nil>}
I1210 00:29:28.581530 887464 main.go:141] libmachine: About to run SSH command:
hostname
I1210 00:29:28.582219 887464 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:53622->127.0.0.1:33635: read: connection reset by peer
I1210 00:29:31.718566 887464 main.go:141] libmachine: SSH cmd err, output: <nil>: newest-cni-451721
I1210 00:29:31.718598 887464 ubuntu.go:169] provisioning hostname "newest-cni-451721"
I1210 00:29:31.718678 887464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" newest-cni-451721
I1210 00:29:31.739094 887464 main.go:141] libmachine: Using SSH client type: native
I1210 00:29:31.739288 887464 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x866ca0] 0x869980 <nil> [] 0s} 127.0.0.1 33635 <nil> <nil>}
I1210 00:29:31.739302 887464 main.go:141] libmachine: About to run SSH command:
sudo hostname newest-cni-451721 && echo "newest-cni-451721" | sudo tee /etc/hostname
I1210 00:29:31.878663 887464 main.go:141] libmachine: SSH cmd err, output: <nil>: newest-cni-451721
I1210 00:29:31.878749 887464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" newest-cni-451721
I1210 00:29:31.898635 887464 main.go:141] libmachine: Using SSH client type: native
I1210 00:29:31.898817 887464 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x866ca0] 0x869980 <nil> [] 0s} 127.0.0.1 33635 <nil> <nil>}
I1210 00:29:31.898879 887464 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\snewest-cni-451721' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 newest-cni-451721/g' /etc/hosts;
else
echo '127.0.1.1 newest-cni-451721' | sudo tee -a /etc/hosts;
fi
fi
I1210 00:29:32.031354 887464 main.go:141] libmachine: SSH cmd err, output: <nil>:
I1210 00:29:32.031382 887464 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/20062-527107/.minikube CaCertPath:/home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/20062-527107/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/20062-527107/.minikube}
I1210 00:29:32.031413 887464 ubuntu.go:177] setting up certificates
I1210 00:29:32.031427 887464 provision.go:84] configureAuth start
I1210 00:29:32.031490 887464 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" newest-cni-451721
I1210 00:29:32.049598 887464 provision.go:143] copyHostCerts
I1210 00:29:32.049667 887464 exec_runner.go:144] found /home/jenkins/minikube-integration/20062-527107/.minikube/ca.pem, removing ...
I1210 00:29:32.049688 887464 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20062-527107/.minikube/ca.pem
I1210 00:29:32.049780 887464 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/20062-527107/.minikube/ca.pem (1082 bytes)
I1210 00:29:32.049924 887464 exec_runner.go:144] found /home/jenkins/minikube-integration/20062-527107/.minikube/cert.pem, removing ...
I1210 00:29:32.049940 887464 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20062-527107/.minikube/cert.pem
I1210 00:29:32.049978 887464 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/20062-527107/.minikube/cert.pem (1123 bytes)
I1210 00:29:32.050077 887464 exec_runner.go:144] found /home/jenkins/minikube-integration/20062-527107/.minikube/key.pem, removing ...
I1210 00:29:32.050090 887464 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20062-527107/.minikube/key.pem
I1210 00:29:32.050123 887464 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/20062-527107/.minikube/key.pem (1679 bytes)
I1210 00:29:32.050196 887464 provision.go:117] generating server cert: /home/jenkins/minikube-integration/20062-527107/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca-key.pem org=jenkins.newest-cni-451721 san=[127.0.0.1 192.168.76.2 localhost minikube newest-cni-451721]
I1210 00:29:32.326147 887464 provision.go:177] copyRemoteCerts
I1210 00:29:32.326235 887464 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1210 00:29:32.326275 887464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" newest-cni-451721
I1210 00:29:32.345347 887464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33635 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/newest-cni-451721/id_rsa Username:docker}
I1210 00:29:32.440306 887464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1210 00:29:32.464567 887464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1210 00:29:32.489137 887464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1210 00:29:32.512700 887464 provision.go:87] duration metric: took 481.257458ms to configureAuth
I1210 00:29:32.512730 887464 ubuntu.go:193] setting minikube options for container-runtime
I1210 00:29:32.512986 887464 config.go:182] Loaded profile config "newest-cni-451721": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.31.2
I1210 00:29:32.513003 887464 machine.go:96] duration metric: took 3.951040589s to provisionDockerMachine
I1210 00:29:32.513012 887464 start.go:293] postStartSetup for "newest-cni-451721" (driver="docker")
I1210 00:29:32.513029 887464 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1210 00:29:32.513092 887464 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1210 00:29:32.513140 887464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" newest-cni-451721
I1210 00:29:32.532331 887464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33635 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/newest-cni-451721/id_rsa Username:docker}
I1210 00:29:32.627992 887464 ssh_runner.go:195] Run: cat /etc/os-release
I1210 00:29:32.631492 887464 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1210 00:29:32.631533 887464 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I1210 00:29:32.631543 887464 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I1210 00:29:32.631553 887464 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I1210 00:29:32.631567 887464 filesync.go:126] Scanning /home/jenkins/minikube-integration/20062-527107/.minikube/addons for local assets ...
I1210 00:29:32.631629 887464 filesync.go:126] Scanning /home/jenkins/minikube-integration/20062-527107/.minikube/files for local assets ...
I1210 00:29:32.631731 887464 filesync.go:149] local asset: /home/jenkins/minikube-integration/20062-527107/.minikube/files/etc/ssl/certs/5339162.pem -> 5339162.pem in /etc/ssl/certs
I1210 00:29:32.631856 887464 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1210 00:29:32.640237 887464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/files/etc/ssl/certs/5339162.pem --> /etc/ssl/certs/5339162.pem (1708 bytes)
I1210 00:29:32.663676 887464 start.go:296] duration metric: took 150.640477ms for postStartSetup
I1210 00:29:32.663763 887464 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1210 00:29:32.663811 887464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" newest-cni-451721
I1210 00:29:32.682465 887464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33635 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/newest-cni-451721/id_rsa Username:docker}
I1210 00:29:32.771807 887464 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1210 00:29:32.776386 887464 fix.go:56] duration metric: took 4.550910019s for fixHost
I1210 00:29:32.776419 887464 start.go:83] releasing machines lock for "newest-cni-451721", held for 4.550965314s
I1210 00:29:32.776510 887464 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" newest-cni-451721
I1210 00:29:32.796432 887464 ssh_runner.go:195] Run: cat /version.json
I1210 00:29:32.796486 887464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" newest-cni-451721
I1210 00:29:32.796576 887464 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1210 00:29:32.796638 887464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" newest-cni-451721
I1210 00:29:32.816364 887464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33635 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/newest-cni-451721/id_rsa Username:docker}
I1210 00:29:32.816675 887464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33635 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/newest-cni-451721/id_rsa Username:docker}
I1210 00:29:32.981168 887464 ssh_runner.go:195] Run: systemctl --version
I1210 00:29:32.985700 887464 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I1210 00:29:32.990213 887464 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I1210 00:29:33.007987 887464 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I1210 00:29:33.008087 887464 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1210 00:29:33.017598 887464 cni.go:259] no active bridge cni configs found in "/etc/cni/net.d" - nothing to disable
I1210 00:29:33.017635 887464 start.go:495] detecting cgroup driver to use...
I1210 00:29:33.017670 887464 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1210 00:29:33.017763 887464 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1210 00:29:33.031659 887464 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1210 00:29:33.043074 887464 docker.go:217] disabling cri-docker service (if available) ...
I1210 00:29:33.043144 887464 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1210 00:29:33.056029 887464 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1210 00:29:33.067719 887464 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1210 00:29:33.145800 887464 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1210 00:29:33.226113 887464 docker.go:233] disabling docker service ...
I1210 00:29:33.226190 887464 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1210 00:29:33.240118 887464 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1210 00:29:33.252946 887464 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1210 00:29:33.338415 887464 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1210 00:29:33.424824 887464 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1210 00:29:33.438244 887464 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1210 00:29:33.458797 887464 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I1210 00:29:33.469817 887464 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1210 00:29:33.481614 887464 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1210 00:29:33.481680 887464 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1210 00:29:33.493683 887464 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1210 00:29:33.504187 887464 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1210 00:29:33.515028 887464 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1210 00:29:33.526651 887464 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1210 00:29:33.537394 887464 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1210 00:29:33.547664 887464 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1210 00:29:33.557993 887464 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1210 00:29:33.568674 887464 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1210 00:29:33.578498 887464 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1210 00:29:33.588050 887464 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1210 00:29:33.664722 887464 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1210 00:29:33.770599 887464 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I1210 00:29:33.770674 887464 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1210 00:29:33.775072 887464 start.go:563] Will wait 60s for crictl version
I1210 00:29:33.775153 887464 ssh_runner.go:195] Run: which crictl
I1210 00:29:33.779199 887464 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1210 00:29:33.816009 887464 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.22
RuntimeApiVersion: v1
I1210 00:29:33.816096 887464 ssh_runner.go:195] Run: containerd --version
I1210 00:29:33.841408 887464 ssh_runner.go:195] Run: containerd --version
I1210 00:29:33.868731 887464 out.go:177] * Preparing Kubernetes v1.31.2 on containerd 1.7.22 ...
I1210 00:29:33.870484 887464 cli_runner.go:164] Run: docker network inspect newest-cni-451721 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1210 00:29:33.889181 887464 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I1210 00:29:33.893315 887464 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1210 00:29:33.906416 887464 out.go:177] - kubeadm.pod-network-cidr=10.42.0.0/16
I1210 00:29:33.907756 887464 kubeadm.go:883] updating cluster {Name:newest-cni-451721 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1730888964-19917@sha256:629a5748e3ec15a091fef12257eb3754b8ffc0c974ebcbb016451c65d1829615 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.2 ClusterName:newest-cni-451721 Namespace:default APIServerHAVIP: APIServerName:minikubeCA API
ServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates:ServerSideApply=true ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.31.2 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeReques
ted:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1210 00:29:33.907938 887464 preload.go:131] Checking if preload exists for k8s version v1.31.2 and runtime containerd
I1210 00:29:33.908022 887464 ssh_runner.go:195] Run: sudo crictl images --output json
I1210 00:29:33.941650 887464 containerd.go:627] all images are preloaded for containerd runtime.
I1210 00:29:33.941676 887464 containerd.go:534] Images already preloaded, skipping extraction
I1210 00:29:33.941733 887464 ssh_runner.go:195] Run: sudo crictl images --output json
I1210 00:29:33.978652 887464 containerd.go:627] all images are preloaded for containerd runtime.
I1210 00:29:33.978680 887464 cache_images.go:84] Images are preloaded, skipping loading
I1210 00:29:33.978692 887464 kubeadm.go:934] updating node { 192.168.76.2 8443 v1.31.2 containerd true true} ...
I1210 00:29:33.978829 887464 kubeadm.go:946] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.31.2/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --feature-gates=ServerSideApply=true --hostname-override=newest-cni-451721 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.31.2 ClusterName:newest-cni-451721 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates:ServerSideApply=true ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1210 00:29:33.978962 887464 ssh_runner.go:195] Run: sudo crictl info
I1210 00:29:34.014053 887464 cni.go:84] Creating CNI manager for ""
I1210 00:29:34.014075 887464 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1210 00:29:34.014085 887464 kubeadm.go:84] Using pod CIDR: 10.42.0.0/16
I1210 00:29:34.014113 887464 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.42.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.31.2 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:newest-cni-451721 NodeName:newest-cni-451721 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota feature-gates:ServerSideApply=true] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true feature-gates:ServerSideApply=true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[feature-gates:ServerSideApply=true leader-elect:false] Pairs:map[]}] FeatureArg
s:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1210 00:29:34.014227 887464 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "newest-cni-451721"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.76.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
- name: "feature-gates"
value: "ServerSideApply=true"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "feature-gates"
value: "ServerSideApply=true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "feature-gates"
value: "ServerSideApply=true"
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
- name: "proxy-refresh-interval"
value: "70000"
kubernetesVersion: v1.31.2
networking:
dnsDomain: cluster.local
podSubnet: "10.42.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.42.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1210 00:29:34.014287 887464 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.31.2
I1210 00:29:34.023288 887464 binaries.go:44] Found k8s binaries, skipping transfer
I1210 00:29:34.023375 887464 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1210 00:29:34.032305 887464 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (358 bytes)
I1210 00:29:34.049707 887464 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1210 00:29:34.068733 887464 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2497 bytes)
I1210 00:29:34.086908 887464 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I1210 00:29:34.091099 887464 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1210 00:29:34.102305 887464 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1210 00:29:34.181346 887464 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1210 00:29:34.195374 887464 certs.go:68] Setting up /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/newest-cni-451721 for IP: 192.168.76.2
I1210 00:29:34.195403 887464 certs.go:194] generating shared ca certs ...
I1210 00:29:34.195424 887464 certs.go:226] acquiring lock for ca certs: {Name:mk98ae8901439369b17532a89b5c8e73a55c28a4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1210 00:29:34.195593 887464 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/20062-527107/.minikube/ca.key
I1210 00:29:34.195656 887464 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/20062-527107/.minikube/proxy-client-ca.key
I1210 00:29:34.195673 887464 certs.go:256] generating profile certs ...
I1210 00:29:34.195789 887464 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/newest-cni-451721/client.key
I1210 00:29:34.195881 887464 certs.go:359] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/newest-cni-451721/apiserver.key.9d7ec933
I1210 00:29:34.195944 887464 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/newest-cni-451721/proxy-client.key
I1210 00:29:34.196096 887464 certs.go:484] found cert: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/533916.pem (1338 bytes)
W1210 00:29:34.196135 887464 certs.go:480] ignoring /home/jenkins/minikube-integration/20062-527107/.minikube/certs/533916_empty.pem, impossibly tiny 0 bytes
I1210 00:29:34.196148 887464 certs.go:484] found cert: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca-key.pem (1675 bytes)
I1210 00:29:34.196181 887464 certs.go:484] found cert: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/ca.pem (1082 bytes)
I1210 00:29:34.196218 887464 certs.go:484] found cert: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/cert.pem (1123 bytes)
I1210 00:29:34.196249 887464 certs.go:484] found cert: /home/jenkins/minikube-integration/20062-527107/.minikube/certs/key.pem (1679 bytes)
I1210 00:29:34.196302 887464 certs.go:484] found cert: /home/jenkins/minikube-integration/20062-527107/.minikube/files/etc/ssl/certs/5339162.pem (1708 bytes)
I1210 00:29:34.197251 887464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1210 00:29:34.222765 887464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1210 00:29:34.248903 887464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1210 00:29:34.329829 887464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1210 00:29:34.359022 887464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/newest-cni-451721/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1210 00:29:34.383488 887464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/newest-cni-451721/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1210 00:29:34.408011 887464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/newest-cni-451721/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1210 00:29:34.431914 887464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/profiles/newest-cni-451721/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1210 00:29:34.457411 887464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/files/etc/ssl/certs/5339162.pem --> /usr/share/ca-certificates/5339162.pem (1708 bytes)
I1210 00:29:34.485273 887464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1210 00:29:34.509422 887464 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20062-527107/.minikube/certs/533916.pem --> /usr/share/ca-certificates/533916.pem (1338 bytes)
I1210 00:29:34.532168 887464 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1210 00:29:34.549326 887464 ssh_runner.go:195] Run: openssl version
I1210 00:29:34.554726 887464 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/533916.pem && ln -fs /usr/share/ca-certificates/533916.pem /etc/ssl/certs/533916.pem"
I1210 00:29:34.564061 887464 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/533916.pem
I1210 00:29:34.567795 887464 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Dec 9 23:51 /usr/share/ca-certificates/533916.pem
I1210 00:29:34.567868 887464 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/533916.pem
I1210 00:29:34.574598 887464 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/533916.pem /etc/ssl/certs/51391683.0"
I1210 00:29:34.583338 887464 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/5339162.pem && ln -fs /usr/share/ca-certificates/5339162.pem /etc/ssl/certs/5339162.pem"
I1210 00:29:34.592492 887464 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/5339162.pem
I1210 00:29:34.595953 887464 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Dec 9 23:51 /usr/share/ca-certificates/5339162.pem
I1210 00:29:34.596010 887464 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/5339162.pem
I1210 00:29:34.602708 887464 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/5339162.pem /etc/ssl/certs/3ec20f2e.0"
I1210 00:29:34.611894 887464 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1210 00:29:34.621005 887464 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1210 00:29:34.624374 887464 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Dec 9 23:44 /usr/share/ca-certificates/minikubeCA.pem
I1210 00:29:34.624434 887464 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1210 00:29:34.630826 887464 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1210 00:29:34.639526 887464 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1210 00:29:34.643259 887464 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1210 00:29:34.649873 887464 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1210 00:29:34.656724 887464 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1210 00:29:34.663508 887464 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1210 00:29:34.670632 887464 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1210 00:29:34.677777 887464 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
I1210 00:29:34.684421 887464 kubeadm.go:392] StartCluster: {Name:newest-cni-451721 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1730888964-19917@sha256:629a5748e3ec15a091fef12257eb3754b8ffc0c974ebcbb016451c65d1829615 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.2 ClusterName:newest-cni-451721 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISer
verNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates:ServerSideApply=true ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.31.2 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested
:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1210 00:29:34.684548 887464 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1210 00:29:34.684597 887464 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1210 00:29:34.720425 887464 cri.go:89] found id: "f45296fe00e1ba175b8a13e74278866fe94fb00fe2e40d2ded889f6424bcd2f6"
I1210 00:29:34.720455 887464 cri.go:89] found id: "7240dea8c0bf6b0e231897a661459ab1e0f16e81578a218747c803ee5c62c882"
I1210 00:29:34.720459 887464 cri.go:89] found id: "f10b4481bb2c33d26ae79853dfb882ccfea515ba70ee60ce80479fac7a775d64"
I1210 00:29:34.720462 887464 cri.go:89] found id: "29bd971069607c5d70cab703e88ecb05ffab6991df6d9130d3abd3f0e48f54fb"
I1210 00:29:34.720464 887464 cri.go:89] found id: "990a3b2339579634ce6a297e958b50d16dd4d62aa467d77c0f621f7de7b20e5e"
I1210 00:29:34.720467 887464 cri.go:89] found id: "1cee27fc4429682ec492552b820dbbe6d9126f776c89400dda68d38002ed05d4"
I1210 00:29:34.720469 887464 cri.go:89] found id: ""
I1210 00:29:34.720569 887464 ssh_runner.go:195] Run: sudo runc --root /run/containerd/runc/k8s.io list -f json
I1210 00:29:34.732756 887464 cri.go:116] JSON = null
W1210 00:29:34.732817 887464 kubeadm.go:399] unpause failed: list paused: list returned 0 containers, but ps returned 6
I1210 00:29:34.732887 887464 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1210 00:29:34.741532 887464 kubeadm.go:408] found existing configuration files, will attempt cluster restart
I1210 00:29:34.741556 887464 kubeadm.go:593] restartPrimaryControlPlane start ...
I1210 00:29:34.741611 887464 ssh_runner.go:195] Run: sudo test -d /data/minikube
I1210 00:29:34.750959 887464 kubeadm.go:130] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I1210 00:29:34.751556 887464 kubeconfig.go:47] verify endpoint returned: get endpoint: "newest-cni-451721" does not appear in /home/jenkins/minikube-integration/20062-527107/kubeconfig
I1210 00:29:34.751862 887464 kubeconfig.go:62] /home/jenkins/minikube-integration/20062-527107/kubeconfig needs updating (will repair): [kubeconfig missing "newest-cni-451721" cluster setting kubeconfig missing "newest-cni-451721" context setting]
I1210 00:29:34.752462 887464 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20062-527107/kubeconfig: {Name:mk47c0b52ce4821be2777fdd40884aa11f573a8b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1210 00:29:34.754198 887464 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I1210 00:29:34.764702 887464 kubeadm.go:630] The running cluster does not require reconfiguration: 192.168.76.2
I1210 00:29:34.764766 887464 kubeadm.go:597] duration metric: took 23.189594ms to restartPrimaryControlPlane
I1210 00:29:34.764780 887464 kubeadm.go:394] duration metric: took 80.376259ms to StartCluster
I1210 00:29:34.764801 887464 settings.go:142] acquiring lock: {Name:mk0114e7c414efdfe48670d68c91542cc6018bea Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1210 00:29:34.764879 887464 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/20062-527107/kubeconfig
I1210 00:29:34.765807 887464 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20062-527107/kubeconfig: {Name:mk47c0b52ce4821be2777fdd40884aa11f573a8b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1210 00:29:34.766059 887464 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.31.2 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1210 00:29:34.766237 887464 addons.go:507] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:true default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1210 00:29:34.766342 887464 addons.go:69] Setting storage-provisioner=true in profile "newest-cni-451721"
I1210 00:29:34.766386 887464 addons.go:69] Setting default-storageclass=true in profile "newest-cni-451721"
I1210 00:29:34.766403 887464 addons.go:69] Setting metrics-server=true in profile "newest-cni-451721"
I1210 00:29:34.766421 887464 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "newest-cni-451721"
I1210 00:29:34.766434 887464 addons.go:234] Setting addon metrics-server=true in "newest-cni-451721"
I1210 00:29:34.766434 887464 config.go:182] Loaded profile config "newest-cni-451721": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.31.2
W1210 00:29:34.766448 887464 addons.go:243] addon metrics-server should already be in state true
I1210 00:29:34.766434 887464 addons.go:69] Setting dashboard=true in profile "newest-cni-451721"
I1210 00:29:34.766492 887464 addons.go:234] Setting addon dashboard=true in "newest-cni-451721"
I1210 00:29:34.766497 887464 host.go:66] Checking if "newest-cni-451721" exists ...
W1210 00:29:34.766509 887464 addons.go:243] addon dashboard should already be in state true
I1210 00:29:34.766550 887464 host.go:66] Checking if "newest-cni-451721" exists ...
I1210 00:29:34.766800 887464 cli_runner.go:164] Run: docker container inspect newest-cni-451721 --format={{.State.Status}}
I1210 00:29:34.767072 887464 cli_runner.go:164] Run: docker container inspect newest-cni-451721 --format={{.State.Status}}
I1210 00:29:34.767090 887464 addons.go:234] Setting addon storage-provisioner=true in "newest-cni-451721"
W1210 00:29:34.767105 887464 addons.go:243] addon storage-provisioner should already be in state true
I1210 00:29:34.767133 887464 host.go:66] Checking if "newest-cni-451721" exists ...
I1210 00:29:34.767072 887464 cli_runner.go:164] Run: docker container inspect newest-cni-451721 --format={{.State.Status}}
I1210 00:29:34.767642 887464 cli_runner.go:164] Run: docker container inspect newest-cni-451721 --format={{.State.Status}}
I1210 00:29:34.775231 887464 out.go:177] * Verifying Kubernetes components...
I1210 00:29:34.777185 887464 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1210 00:29:34.796791 887464 addons.go:234] Setting addon default-storageclass=true in "newest-cni-451721"
W1210 00:29:34.796822 887464 addons.go:243] addon default-storageclass should already be in state true
I1210 00:29:34.796855 887464 host.go:66] Checking if "newest-cni-451721" exists ...
I1210 00:29:34.797358 887464 cli_runner.go:164] Run: docker container inspect newest-cni-451721 --format={{.State.Status}}
I1210 00:29:34.803100 887464 out.go:177] - Using image docker.io/kubernetesui/dashboard:v2.7.0
I1210 00:29:34.803250 887464 out.go:177] - Using image fake.domain/registry.k8s.io/echoserver:1.4
I1210 00:29:34.803345 887464 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1210 00:29:34.804400 887464 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I1210 00:29:34.805594 887464 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I1210 00:29:34.805664 887464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" newest-cni-451721
I1210 00:29:34.806737 887464 out.go:177] - Using image registry.k8s.io/echoserver:1.4
I1210 00:29:30.284394 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:32.285567 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:34.789978 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:34.806919 887464 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1210 00:29:34.806941 887464 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1210 00:29:34.806995 887464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" newest-cni-451721
I1210 00:29:34.810379 887464 addons.go:431] installing /etc/kubernetes/addons/dashboard-ns.yaml
I1210 00:29:34.810407 887464 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
I1210 00:29:34.810471 887464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" newest-cni-451721
I1210 00:29:34.822121 887464 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I1210 00:29:34.822146 887464 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1210 00:29:34.822211 887464 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" newest-cni-451721
I1210 00:29:34.832359 887464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33635 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/newest-cni-451721/id_rsa Username:docker}
I1210 00:29:34.837230 887464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33635 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/newest-cni-451721/id_rsa Username:docker}
I1210 00:29:34.848827 887464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33635 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/newest-cni-451721/id_rsa Username:docker}
I1210 00:29:34.866146 887464 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33635 SSHKeyPath:/home/jenkins/minikube-integration/20062-527107/.minikube/machines/newest-cni-451721/id_rsa Username:docker}
I1210 00:29:35.053589 887464 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1210 00:29:35.130427 887464 api_server.go:52] waiting for apiserver process to appear ...
I1210 00:29:35.130519 887464 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1210 00:29:35.155921 887464 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1210 00:29:35.228590 887464 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1210 00:29:35.230681 887464 addons.go:431] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
I1210 00:29:35.230714 887464 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
I1210 00:29:35.234672 887464 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I1210 00:29:35.234700 887464 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1825 bytes)
I1210 00:29:35.259848 887464 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I1210 00:29:35.259882 887464 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I1210 00:29:35.327740 887464 addons.go:431] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
I1210 00:29:35.327780 887464 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
I1210 00:29:35.433374 887464 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I1210 00:29:35.433420 887464 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I1210 00:29:35.439815 887464 addons.go:431] installing /etc/kubernetes/addons/dashboard-configmap.yaml
I1210 00:29:35.439855 887464 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
I1210 00:29:35.546184 887464 addons.go:431] installing /etc/kubernetes/addons/dashboard-dp.yaml
I1210 00:29:35.546214 887464 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4201 bytes)
I1210 00:29:35.547308 887464 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1210 00:29:35.630764 887464 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
W1210 00:29:35.645619 887464 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
error: error validating "/etc/kubernetes/addons/storage-provisioner.yaml": error validating data: failed to download openapi: Get "https://localhost:8443/openapi/v2?timeout=32s": dial tcp [::1]:8443: connect: connection refused; if you choose to ignore these errors, turn validation off with --validate=false
I1210 00:29:35.645675 887464 retry.go:31] will retry after 353.055628ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
error: error validating "/etc/kubernetes/addons/storage-provisioner.yaml": error validating data: failed to download openapi: Get "https://localhost:8443/openapi/v2?timeout=32s": dial tcp [::1]:8443: connect: connection refused; if you choose to ignore these errors, turn validation off with --validate=false
W1210 00:29:35.645766 887464 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
error: error validating "/etc/kubernetes/addons/storageclass.yaml": error validating data: failed to download openapi: Get "https://localhost:8443/openapi/v2?timeout=32s": dial tcp [::1]:8443: connect: connection refused; if you choose to ignore these errors, turn validation off with --validate=false
I1210 00:29:35.645792 887464 retry.go:31] will retry after 311.554382ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
error: error validating "/etc/kubernetes/addons/storageclass.yaml": error validating data: failed to download openapi: Get "https://localhost:8443/openapi/v2?timeout=32s": dial tcp [::1]:8443: connect: connection refused; if you choose to ignore these errors, turn validation off with --validate=false
I1210 00:29:35.648495 887464 addons.go:431] installing /etc/kubernetes/addons/dashboard-role.yaml
I1210 00:29:35.648527 887464 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
I1210 00:29:35.746716 887464 addons.go:431] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
I1210 00:29:35.746752 887464 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
I1210 00:29:35.834393 887464 addons.go:431] installing /etc/kubernetes/addons/dashboard-sa.yaml
I1210 00:29:35.834427 887464 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
I1210 00:29:35.858323 887464 addons.go:431] installing /etc/kubernetes/addons/dashboard-secret.yaml
I1210 00:29:35.858353 887464 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
I1210 00:29:35.939882 887464 addons.go:431] installing /etc/kubernetes/addons/dashboard-svc.yaml
I1210 00:29:35.939911 887464 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
I1210 00:29:35.957491 887464 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I1210 00:29:35.957605 887464 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
I1210 00:29:35.999237 887464 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
I1210 00:29:37.285571 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:39.785467 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:40.933317 887464 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (5.385951285s)
I1210 00:29:40.933376 887464 addons.go:475] Verifying addon metrics-server=true in "newest-cni-451721"
I1210 00:29:40.933328 887464 ssh_runner.go:235] Completed: sudo pgrep -xnf kube-apiserver.*minikube.*: (5.302534863s)
I1210 00:29:40.933410 887464 api_server.go:72] duration metric: took 6.167314054s to wait for apiserver process to appear ...
I1210 00:29:40.933423 887464 api_server.go:88] waiting for apiserver healthz status ...
I1210 00:29:40.933446 887464 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1210 00:29:40.938900 887464 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I1210 00:29:40.945648 887464 api_server.go:141] control plane version: v1.31.2
I1210 00:29:40.945689 887464 api_server.go:131] duration metric: took 12.258143ms to wait for apiserver health ...
I1210 00:29:40.945702 887464 system_pods.go:43] waiting for kube-system pods to appear ...
I1210 00:29:40.955030 887464 system_pods.go:59] 9 kube-system pods found
I1210 00:29:40.955077 887464 system_pods.go:61] "coredns-7c65d6cfc9-4g9ws" [d880636c-3f52-4266-b53b-588922ffa1a7] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1210 00:29:40.955107 887464 system_pods.go:61] "etcd-newest-cni-451721" [23b0abb1-873a-4c7e-9b2d-d37c891d429b] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1210 00:29:40.955119 887464 system_pods.go:61] "kindnet-bgv7c" [9a246ce8-7f88-4a61-99b8-5003d2988222] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni])
I1210 00:29:40.955138 887464 system_pods.go:61] "kube-apiserver-newest-cni-451721" [0345a79b-8c71-45ac-b297-08882c7c9420] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1210 00:29:40.955150 887464 system_pods.go:61] "kube-controller-manager-newest-cni-451721" [5775aac2-64af-4cbb-a8b9-93c7f5c9f7d3] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1210 00:29:40.955156 887464 system_pods.go:61] "kube-proxy-6xl4q" [c7d3ada4-2dd1-433a-a2ec-93f2633cca61] Running
I1210 00:29:40.955165 887464 system_pods.go:61] "kube-scheduler-newest-cni-451721" [4c415b07-95d5-47f6-b7f3-ed552712e94f] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1210 00:29:40.955177 887464 system_pods.go:61] "metrics-server-6867b74b74-zftqz" [b1947c40-5f55-435f-ba46-caae73951f90] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1210 00:29:40.955186 887464 system_pods.go:61] "storage-provisioner" [a9dbb794-3767-4fc5-8ea5-4fffbb6105d3] Running
I1210 00:29:40.955195 887464 system_pods.go:74] duration metric: took 9.485295ms to wait for pod list to return data ...
I1210 00:29:40.955209 887464 default_sa.go:34] waiting for default service account to be created ...
I1210 00:29:40.958794 887464 default_sa.go:45] found service account: "default"
I1210 00:29:40.958827 887464 default_sa.go:55] duration metric: took 3.606033ms for default service account to be created ...
I1210 00:29:40.958889 887464 kubeadm.go:582] duration metric: took 6.192791752s to wait for: map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true]
I1210 00:29:40.958912 887464 node_conditions.go:102] verifying NodePressure condition ...
I1210 00:29:40.962661 887464 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1210 00:29:40.962695 887464 node_conditions.go:123] node cpu capacity is 8
I1210 00:29:40.962721 887464 node_conditions.go:105] duration metric: took 3.791678ms to run NodePressure ...
I1210 00:29:40.962735 887464 start.go:241] waiting for startup goroutines ...
I1210 00:29:41.042991 887464 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (5.085448346s)
I1210 00:29:41.043039 887464 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: (5.085379809s)
I1210 00:29:41.043119 887464 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: (5.043845229s)
I1210 00:29:41.044774 887464 out.go:177] * Some dashboard features require the metrics-server addon. To enable all features please run:
minikube -p newest-cni-451721 addons enable metrics-server
I1210 00:29:41.049622 887464 out.go:177] * Enabled addons: metrics-server, storage-provisioner, dashboard, default-storageclass
I1210 00:29:41.051006 887464 addons.go:510] duration metric: took 6.284770091s for enable addons: enabled=[metrics-server storage-provisioner dashboard default-storageclass]
I1210 00:29:41.051059 887464 start.go:246] waiting for cluster config update ...
I1210 00:29:41.051074 887464 start.go:255] writing updated cluster config ...
I1210 00:29:41.051387 887464 ssh_runner.go:195] Run: rm -f paused
I1210 00:29:41.102339 887464 start.go:600] kubectl: 1.31.3, cluster: 1.31.2 (minor skew: 0)
I1210 00:29:41.104014 887464 out.go:177] * Done! kubectl is now configured to use "newest-cni-451721" cluster and "default" namespace by default
I1210 00:29:41.786195 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:44.285629 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:46.286997 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:48.784966 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:50.785565 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:53.287484 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:55.785238 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:29:58.285947 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:00.784947 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:02.785627 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:05.285425 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:07.785233 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:10.285053 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:12.785346 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:15.284585 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:17.285141 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:19.785686 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:21.786049 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:24.285409 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:26.286022 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:28.785097 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:30.785142 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:33.285545 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:35.785510 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:38.284978 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:40.784638 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:42.784707 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:44.784825 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:47.285489 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:49.286104 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:51.785143 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:53.786017 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:56.285495 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:30:58.285964 869958 pod_ready.go:103] pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace has status "Ready":"False"
I1210 00:31:00.285755 869958 pod_ready.go:82] duration metric: took 4m0.006380848s for pod "metrics-server-9975d5f86-9wg6p" in "kube-system" namespace to be "Ready" ...
E1210 00:31:00.285781 869958 pod_ready.go:67] WaitExtra: waitPodCondition: context deadline exceeded
I1210 00:31:00.285790 869958 pod_ready.go:39] duration metric: took 5m30.751897187s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I1210 00:31:00.285822 869958 api_server.go:52] waiting for apiserver process to appear ...
I1210 00:31:00.285858 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1210 00:31:00.285917 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1210 00:31:00.324417 869958 cri.go:89] found id: "9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d"
I1210 00:31:00.324440 869958 cri.go:89] found id: ""
I1210 00:31:00.324448 869958 logs.go:282] 1 containers: [9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d]
I1210 00:31:00.324499 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.328595 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1210 00:31:00.328691 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1210 00:31:00.364828 869958 cri.go:89] found id: "de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2"
I1210 00:31:00.364857 869958 cri.go:89] found id: ""
I1210 00:31:00.364868 869958 logs.go:282] 1 containers: [de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2]
I1210 00:31:00.364938 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.368615 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1210 00:31:00.368696 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1210 00:31:00.403140 869958 cri.go:89] found id: "d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0"
I1210 00:31:00.403164 869958 cri.go:89] found id: ""
I1210 00:31:00.403174 869958 logs.go:282] 1 containers: [d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0]
I1210 00:31:00.403233 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.406693 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1210 00:31:00.406754 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1210 00:31:00.440261 869958 cri.go:89] found id: "e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07"
I1210 00:31:00.440286 869958 cri.go:89] found id: ""
I1210 00:31:00.440294 869958 logs.go:282] 1 containers: [e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07]
I1210 00:31:00.440356 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.443836 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1210 00:31:00.443908 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1210 00:31:00.478920 869958 cri.go:89] found id: "930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1"
I1210 00:31:00.478945 869958 cri.go:89] found id: ""
I1210 00:31:00.478955 869958 logs.go:282] 1 containers: [930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1]
I1210 00:31:00.479020 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.482648 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1210 00:31:00.482713 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1210 00:31:00.517931 869958 cri.go:89] found id: "7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82"
I1210 00:31:00.517959 869958 cri.go:89] found id: ""
I1210 00:31:00.517969 869958 logs.go:282] 1 containers: [7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82]
I1210 00:31:00.518027 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.522393 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1210 00:31:00.522470 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1210 00:31:00.558076 869958 cri.go:89] found id: "1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a"
I1210 00:31:00.558099 869958 cri.go:89] found id: ""
I1210 00:31:00.558107 869958 logs.go:282] 1 containers: [1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a]
I1210 00:31:00.558159 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.561741 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1210 00:31:00.561812 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1210 00:31:00.598626 869958 cri.go:89] found id: "b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e"
I1210 00:31:00.598664 869958 cri.go:89] found id: "5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24"
I1210 00:31:00.598674 869958 cri.go:89] found id: ""
I1210 00:31:00.598682 869958 logs.go:282] 2 containers: [b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e 5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24]
I1210 00:31:00.598746 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.602345 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.605648 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kubernetes-dashboard Namespaces:[]}
I1210 00:31:00.605713 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kubernetes-dashboard
I1210 00:31:00.638537 869958 cri.go:89] found id: "71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e"
I1210 00:31:00.638564 869958 cri.go:89] found id: ""
I1210 00:31:00.638574 869958 logs.go:282] 1 containers: [71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e]
I1210 00:31:00.638635 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:00.642267 869958 logs.go:123] Gathering logs for kubelet ...
I1210 00:31:00.642297 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W1210 00:31:00.684072 869958 logs.go:138] Found kubelet problem: Dec 10 00:25:36 old-k8s-version-280963 kubelet[1066]: E1210 00:25:36.013371 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:00.684251 869958 logs.go:138] Found kubelet problem: Dec 10 00:25:36 old-k8s-version-280963 kubelet[1066]: E1210 00:25:36.276847 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.686239 869958 logs.go:138] Found kubelet problem: Dec 10 00:25:49 old-k8s-version-280963 kubelet[1066]: E1210 00:25:49.092116 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:00.687741 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:00 old-k8s-version-280963 kubelet[1066]: E1210 00:26:00.341400 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 10s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.687978 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:01 old-k8s-version-280963 kubelet[1066]: E1210 00:26:01.348361 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 10s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.688111 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:02 old-k8s-version-280963 kubelet[1066]: E1210 00:26:02.063829 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.688445 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:02 old-k8s-version-280963 kubelet[1066]: E1210 00:26:02.351893 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 10s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.690436 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:14 old-k8s-version-280963 kubelet[1066]: E1210 00:26:14.082796 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:00.691134 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:16 old-k8s-version-280963 kubelet[1066]: E1210 00:26:16.385007 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.691375 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:20 old-k8s-version-280963 kubelet[1066]: E1210 00:26:20.929425 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.691523 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:25 old-k8s-version-280963 kubelet[1066]: E1210 00:26:25.063820 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.691758 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:32 old-k8s-version-280963 kubelet[1066]: E1210 00:26:32.063614 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.691889 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:37 old-k8s-version-280963 kubelet[1066]: E1210 00:26:37.063859 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.692313 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:45 old-k8s-version-280963 kubelet[1066]: E1210 00:26:45.451805 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.692572 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:50 old-k8s-version-280963 kubelet[1066]: E1210 00:26:50.929571 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.692717 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:51 old-k8s-version-280963 kubelet[1066]: E1210 00:26:51.063691 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.692950 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:04 old-k8s-version-280963 kubelet[1066]: E1210 00:27:04.063486 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.694659 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:06 old-k8s-version-280963 kubelet[1066]: E1210 00:27:06.100485 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:00.694960 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:16 old-k8s-version-280963 kubelet[1066]: E1210 00:27:16.063301 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.695110 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:18 old-k8s-version-280963 kubelet[1066]: E1210 00:27:18.063936 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.695245 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:29 old-k8s-version-280963 kubelet[1066]: E1210 00:27:29.063910 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.695668 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:30 old-k8s-version-280963 kubelet[1066]: E1210 00:27:30.551122 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.695901 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:31 old-k8s-version-280963 kubelet[1066]: E1210 00:27:31.554624 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.696137 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:42 old-k8s-version-280963 kubelet[1066]: E1210 00:27:42.063651 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.696291 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:43 old-k8s-version-280963 kubelet[1066]: E1210 00:27:43.063770 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.696535 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:54 old-k8s-version-280963 kubelet[1066]: E1210 00:27:54.063558 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.696667 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:55 old-k8s-version-280963 kubelet[1066]: E1210 00:27:55.063561 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.696899 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:05 old-k8s-version-280963 kubelet[1066]: E1210 00:28:05.063379 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.697036 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:10 old-k8s-version-280963 kubelet[1066]: E1210 00:28:10.063837 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.697268 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:18 old-k8s-version-280963 kubelet[1066]: E1210 00:28:18.063477 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.697399 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:23 old-k8s-version-280963 kubelet[1066]: E1210 00:28:23.063704 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.697631 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:29 old-k8s-version-280963 kubelet[1066]: E1210 00:28:29.063218 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.699384 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:36 old-k8s-version-280963 kubelet[1066]: E1210 00:28:36.089234 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:00.699619 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:40 old-k8s-version-280963 kubelet[1066]: E1210 00:28:40.063266 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.699750 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:50 old-k8s-version-280963 kubelet[1066]: E1210 00:28:50.063870 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.700169 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:55 old-k8s-version-280963 kubelet[1066]: E1210 00:28:55.726230 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.700403 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:00 old-k8s-version-280963 kubelet[1066]: E1210 00:29:00.929346 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.700534 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:01 old-k8s-version-280963 kubelet[1066]: E1210 00:29:01.063931 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.700665 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:12 old-k8s-version-280963 kubelet[1066]: E1210 00:29:12.063883 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.700897 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:13 old-k8s-version-280963 kubelet[1066]: E1210 00:29:13.063415 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.701157 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:25 old-k8s-version-280963 kubelet[1066]: E1210 00:29:25.063471 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.701316 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:25 old-k8s-version-280963 kubelet[1066]: E1210 00:29:25.063913 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.701550 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:36 old-k8s-version-280963 kubelet[1066]: E1210 00:29:36.063693 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.701682 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:36 old-k8s-version-280963 kubelet[1066]: E1210 00:29:36.063872 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.701914 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:49 old-k8s-version-280963 kubelet[1066]: E1210 00:29:49.063306 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.702050 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:50 old-k8s-version-280963 kubelet[1066]: E1210 00:29:50.063838 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.702287 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:03 old-k8s-version-280963 kubelet[1066]: E1210 00:30:03.063224 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.702419 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:05 old-k8s-version-280963 kubelet[1066]: E1210 00:30:05.063807 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.702550 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:17 old-k8s-version-280963 kubelet[1066]: E1210 00:30:17.063774 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.702784 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:18 old-k8s-version-280963 kubelet[1066]: E1210 00:30:18.063380 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.703080 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:29 old-k8s-version-280963 kubelet[1066]: E1210 00:30:29.063392 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.703219 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:32 old-k8s-version-280963 kubelet[1066]: E1210 00:30:32.063891 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.703456 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:41 old-k8s-version-280963 kubelet[1066]: E1210 00:30:41.063206 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.703587 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:44 old-k8s-version-280963 kubelet[1066]: E1210 00:30:44.064113 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:00.703818 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:54 old-k8s-version-280963 kubelet[1066]: E1210 00:30:54.063639 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:00.703952 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:56 old-k8s-version-280963 kubelet[1066]: E1210 00:30:56.063846 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I1210 00:31:00.703964 869958 logs.go:123] Gathering logs for kube-proxy [930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1] ...
I1210 00:31:00.703989 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1"
I1210 00:31:00.739278 869958 logs.go:123] Gathering logs for kube-controller-manager [7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82] ...
I1210 00:31:00.739323 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82"
I1210 00:31:00.809749 869958 logs.go:123] Gathering logs for storage-provisioner [5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24] ...
I1210 00:31:00.809793 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24"
I1210 00:31:00.843898 869958 logs.go:123] Gathering logs for containerd ...
I1210 00:31:00.843932 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1210 00:31:00.905189 869958 logs.go:123] Gathering logs for kube-apiserver [9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d] ...
I1210 00:31:00.905248 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d"
I1210 00:31:00.975171 869958 logs.go:123] Gathering logs for kube-scheduler [e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07] ...
I1210 00:31:00.975214 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07"
I1210 00:31:01.018685 869958 logs.go:123] Gathering logs for kubernetes-dashboard [71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e] ...
I1210 00:31:01.018727 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e"
I1210 00:31:01.055194 869958 logs.go:123] Gathering logs for dmesg ...
I1210 00:31:01.055228 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1210 00:31:01.082490 869958 logs.go:123] Gathering logs for describe nodes ...
I1210 00:31:01.082531 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1210 00:31:01.188477 869958 logs.go:123] Gathering logs for etcd [de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2] ...
I1210 00:31:01.188515 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2"
I1210 00:31:01.231162 869958 logs.go:123] Gathering logs for kindnet [1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a] ...
I1210 00:31:01.231200 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a"
I1210 00:31:01.270495 869958 logs.go:123] Gathering logs for storage-provisioner [b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e] ...
I1210 00:31:01.270532 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e"
I1210 00:31:01.304676 869958 logs.go:123] Gathering logs for container status ...
I1210 00:31:01.304717 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1210 00:31:01.342082 869958 logs.go:123] Gathering logs for coredns [d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0] ...
I1210 00:31:01.342114 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0"
I1210 00:31:01.377229 869958 out.go:358] Setting ErrFile to fd 2...
I1210 00:31:01.377257 869958 out.go:392] TERM=,COLORTERM=, which probably does not support color
W1210 00:31:01.377336 869958 out.go:270] X Problems detected in kubelet:
W1210 00:31:01.377354 869958 out.go:270] Dec 10 00:30:32 old-k8s-version-280963 kubelet[1066]: E1210 00:30:32.063891 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:01.377363 869958 out.go:270] Dec 10 00:30:41 old-k8s-version-280963 kubelet[1066]: E1210 00:30:41.063206 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:01.377375 869958 out.go:270] Dec 10 00:30:44 old-k8s-version-280963 kubelet[1066]: E1210 00:30:44.064113 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:01.377384 869958 out.go:270] Dec 10 00:30:54 old-k8s-version-280963 kubelet[1066]: E1210 00:30:54.063639 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:01.377397 869958 out.go:270] Dec 10 00:30:56 old-k8s-version-280963 kubelet[1066]: E1210 00:30:56.063846 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I1210 00:31:01.377405 869958 out.go:358] Setting ErrFile to fd 2...
I1210 00:31:01.377416 869958 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1210 00:31:11.378266 869958 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1210 00:31:11.390994 869958 api_server.go:72] duration metric: took 5m52.278015509s to wait for apiserver process to appear ...
I1210 00:31:11.391028 869958 api_server.go:88] waiting for apiserver healthz status ...
I1210 00:31:11.391084 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1210 00:31:11.391155 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1210 00:31:11.425078 869958 cri.go:89] found id: "9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d"
I1210 00:31:11.425104 869958 cri.go:89] found id: ""
I1210 00:31:11.425113 869958 logs.go:282] 1 containers: [9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d]
I1210 00:31:11.425183 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.428759 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1210 00:31:11.428836 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1210 00:31:11.463276 869958 cri.go:89] found id: "de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2"
I1210 00:31:11.463305 869958 cri.go:89] found id: ""
I1210 00:31:11.463313 869958 logs.go:282] 1 containers: [de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2]
I1210 00:31:11.463360 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.467102 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1210 00:31:11.467171 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1210 00:31:11.503957 869958 cri.go:89] found id: "d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0"
I1210 00:31:11.504006 869958 cri.go:89] found id: ""
I1210 00:31:11.504016 869958 logs.go:282] 1 containers: [d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0]
I1210 00:31:11.504079 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.507966 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1210 00:31:11.508041 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1210 00:31:11.542392 869958 cri.go:89] found id: "e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07"
I1210 00:31:11.542415 869958 cri.go:89] found id: ""
I1210 00:31:11.542422 869958 logs.go:282] 1 containers: [e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07]
I1210 00:31:11.542484 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.546043 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1210 00:31:11.546105 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1210 00:31:11.583274 869958 cri.go:89] found id: "930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1"
I1210 00:31:11.583305 869958 cri.go:89] found id: ""
I1210 00:31:11.583316 869958 logs.go:282] 1 containers: [930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1]
I1210 00:31:11.583376 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.587533 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1210 00:31:11.587622 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1210 00:31:11.622287 869958 cri.go:89] found id: "7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82"
I1210 00:31:11.622329 869958 cri.go:89] found id: ""
I1210 00:31:11.622338 869958 logs.go:282] 1 containers: [7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82]
I1210 00:31:11.622399 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.626227 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1210 00:31:11.626300 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1210 00:31:11.661096 869958 cri.go:89] found id: "1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a"
I1210 00:31:11.661119 869958 cri.go:89] found id: ""
I1210 00:31:11.661126 869958 logs.go:282] 1 containers: [1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a]
I1210 00:31:11.661173 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.664907 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kubernetes-dashboard Namespaces:[]}
I1210 00:31:11.664974 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kubernetes-dashboard
I1210 00:31:11.701413 869958 cri.go:89] found id: "71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e"
I1210 00:31:11.701439 869958 cri.go:89] found id: ""
I1210 00:31:11.701448 869958 logs.go:282] 1 containers: [71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e]
I1210 00:31:11.701498 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.705199 869958 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1210 00:31:11.705268 869958 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1210 00:31:11.739637 869958 cri.go:89] found id: "b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e"
I1210 00:31:11.739669 869958 cri.go:89] found id: "5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24"
I1210 00:31:11.739674 869958 cri.go:89] found id: ""
I1210 00:31:11.739682 869958 logs.go:282] 2 containers: [b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e 5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24]
I1210 00:31:11.739748 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.743857 869958 ssh_runner.go:195] Run: which crictl
I1210 00:31:11.747864 869958 logs.go:123] Gathering logs for kube-scheduler [e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07] ...
I1210 00:31:11.747897 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07"
I1210 00:31:11.787539 869958 logs.go:123] Gathering logs for kube-controller-manager [7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82] ...
I1210 00:31:11.787577 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82"
I1210 00:31:11.854239 869958 logs.go:123] Gathering logs for kubernetes-dashboard [71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e] ...
I1210 00:31:11.854286 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e"
I1210 00:31:11.890628 869958 logs.go:123] Gathering logs for storage-provisioner [5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24] ...
I1210 00:31:11.890659 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24"
I1210 00:31:11.924933 869958 logs.go:123] Gathering logs for dmesg ...
I1210 00:31:11.924977 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1210 00:31:11.952597 869958 logs.go:123] Gathering logs for kube-apiserver [9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d] ...
I1210 00:31:11.952639 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d"
I1210 00:31:12.008186 869958 logs.go:123] Gathering logs for etcd [de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2] ...
I1210 00:31:12.008225 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2"
I1210 00:31:12.050981 869958 logs.go:123] Gathering logs for kindnet [1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a] ...
I1210 00:31:12.051019 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a"
I1210 00:31:12.092306 869958 logs.go:123] Gathering logs for storage-provisioner [b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e] ...
I1210 00:31:12.092348 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e"
I1210 00:31:12.126824 869958 logs.go:123] Gathering logs for kubelet ...
I1210 00:31:12.126877 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W1210 00:31:12.167149 869958 logs.go:138] Found kubelet problem: Dec 10 00:25:36 old-k8s-version-280963 kubelet[1066]: E1210 00:25:36.013371 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:12.167339 869958 logs.go:138] Found kubelet problem: Dec 10 00:25:36 old-k8s-version-280963 kubelet[1066]: E1210 00:25:36.276847 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.169400 869958 logs.go:138] Found kubelet problem: Dec 10 00:25:49 old-k8s-version-280963 kubelet[1066]: E1210 00:25:49.092116 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:12.170983 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:00 old-k8s-version-280963 kubelet[1066]: E1210 00:26:00.341400 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 10s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.171225 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:01 old-k8s-version-280963 kubelet[1066]: E1210 00:26:01.348361 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 10s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.171364 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:02 old-k8s-version-280963 kubelet[1066]: E1210 00:26:02.063829 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.171704 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:02 old-k8s-version-280963 kubelet[1066]: E1210 00:26:02.351893 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 10s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.173755 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:14 old-k8s-version-280963 kubelet[1066]: E1210 00:26:14.082796 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:12.174505 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:16 old-k8s-version-280963 kubelet[1066]: E1210 00:26:16.385007 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.174745 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:20 old-k8s-version-280963 kubelet[1066]: E1210 00:26:20.929425 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.174905 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:25 old-k8s-version-280963 kubelet[1066]: E1210 00:26:25.063820 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.175143 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:32 old-k8s-version-280963 kubelet[1066]: E1210 00:26:32.063614 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.175321 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:37 old-k8s-version-280963 kubelet[1066]: E1210 00:26:37.063859 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.175745 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:45 old-k8s-version-280963 kubelet[1066]: E1210 00:26:45.451805 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.175980 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:50 old-k8s-version-280963 kubelet[1066]: E1210 00:26:50.929571 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.176120 869958 logs.go:138] Found kubelet problem: Dec 10 00:26:51 old-k8s-version-280963 kubelet[1066]: E1210 00:26:51.063691 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.176358 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:04 old-k8s-version-280963 kubelet[1066]: E1210 00:27:04.063486 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.178089 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:06 old-k8s-version-280963 kubelet[1066]: E1210 00:27:06.100485 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:12.178356 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:16 old-k8s-version-280963 kubelet[1066]: E1210 00:27:16.063301 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.178495 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:18 old-k8s-version-280963 kubelet[1066]: E1210 00:27:18.063936 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.178628 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:29 old-k8s-version-280963 kubelet[1066]: E1210 00:27:29.063910 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.179087 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:30 old-k8s-version-280963 kubelet[1066]: E1210 00:27:30.551122 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.179328 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:31 old-k8s-version-280963 kubelet[1066]: E1210 00:27:31.554624 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.179563 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:42 old-k8s-version-280963 kubelet[1066]: E1210 00:27:42.063651 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.179696 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:43 old-k8s-version-280963 kubelet[1066]: E1210 00:27:43.063770 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.179934 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:54 old-k8s-version-280963 kubelet[1066]: E1210 00:27:54.063558 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.180068 869958 logs.go:138] Found kubelet problem: Dec 10 00:27:55 old-k8s-version-280963 kubelet[1066]: E1210 00:27:55.063561 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.180308 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:05 old-k8s-version-280963 kubelet[1066]: E1210 00:28:05.063379 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.180463 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:10 old-k8s-version-280963 kubelet[1066]: E1210 00:28:10.063837 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.180701 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:18 old-k8s-version-280963 kubelet[1066]: E1210 00:28:18.063477 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.180836 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:23 old-k8s-version-280963 kubelet[1066]: E1210 00:28:23.063704 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.181073 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:29 old-k8s-version-280963 kubelet[1066]: E1210 00:28:29.063218 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.182823 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:36 old-k8s-version-280963 kubelet[1066]: E1210 00:28:36.089234 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
W1210 00:31:12.183092 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:40 old-k8s-version-280963 kubelet[1066]: E1210 00:28:40.063266 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 1m20s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.183227 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:50 old-k8s-version-280963 kubelet[1066]: E1210 00:28:50.063870 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.183655 869958 logs.go:138] Found kubelet problem: Dec 10 00:28:55 old-k8s-version-280963 kubelet[1066]: E1210 00:28:55.726230 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.183890 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:00 old-k8s-version-280963 kubelet[1066]: E1210 00:29:00.929346 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.184024 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:01 old-k8s-version-280963 kubelet[1066]: E1210 00:29:01.063931 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.184157 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:12 old-k8s-version-280963 kubelet[1066]: E1210 00:29:12.063883 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.184400 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:13 old-k8s-version-280963 kubelet[1066]: E1210 00:29:13.063415 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.184636 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:25 old-k8s-version-280963 kubelet[1066]: E1210 00:29:25.063471 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.184769 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:25 old-k8s-version-280963 kubelet[1066]: E1210 00:29:25.063913 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.185004 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:36 old-k8s-version-280963 kubelet[1066]: E1210 00:29:36.063693 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.185138 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:36 old-k8s-version-280963 kubelet[1066]: E1210 00:29:36.063872 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.185382 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:49 old-k8s-version-280963 kubelet[1066]: E1210 00:29:49.063306 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.185515 869958 logs.go:138] Found kubelet problem: Dec 10 00:29:50 old-k8s-version-280963 kubelet[1066]: E1210 00:29:50.063838 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.185750 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:03 old-k8s-version-280963 kubelet[1066]: E1210 00:30:03.063224 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.185883 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:05 old-k8s-version-280963 kubelet[1066]: E1210 00:30:05.063807 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.186018 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:17 old-k8s-version-280963 kubelet[1066]: E1210 00:30:17.063774 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.186253 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:18 old-k8s-version-280963 kubelet[1066]: E1210 00:30:18.063380 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.186506 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:29 old-k8s-version-280963 kubelet[1066]: E1210 00:30:29.063392 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.186644 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:32 old-k8s-version-280963 kubelet[1066]: E1210 00:30:32.063891 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.186900 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:41 old-k8s-version-280963 kubelet[1066]: E1210 00:30:41.063206 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.187083 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:44 old-k8s-version-280963 kubelet[1066]: E1210 00:30:44.064113 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.187488 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:54 old-k8s-version-280963 kubelet[1066]: E1210 00:30:54.063639 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.187699 869958 logs.go:138] Found kubelet problem: Dec 10 00:30:56 old-k8s-version-280963 kubelet[1066]: E1210 00:30:56.063846 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.188009 869958 logs.go:138] Found kubelet problem: Dec 10 00:31:06 old-k8s-version-280963 kubelet[1066]: E1210 00:31:06.063272 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.188179 869958 logs.go:138] Found kubelet problem: Dec 10 00:31:09 old-k8s-version-280963 kubelet[1066]: E1210 00:31:09.063618 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I1210 00:31:12.188199 869958 logs.go:123] Gathering logs for describe nodes ...
I1210 00:31:12.188219 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.20.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1210 00:31:12.291065 869958 logs.go:123] Gathering logs for kube-proxy [930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1] ...
I1210 00:31:12.291103 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1"
I1210 00:31:12.325400 869958 logs.go:123] Gathering logs for containerd ...
I1210 00:31:12.325437 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1210 00:31:12.385096 869958 logs.go:123] Gathering logs for coredns [d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0] ...
I1210 00:31:12.385143 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0"
I1210 00:31:12.421781 869958 logs.go:123] Gathering logs for container status ...
I1210 00:31:12.421815 869958 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1210 00:31:12.458769 869958 out.go:358] Setting ErrFile to fd 2...
I1210 00:31:12.458797 869958 out.go:392] TERM=,COLORTERM=, which probably does not support color
W1210 00:31:12.458963 869958 out.go:270] X Problems detected in kubelet:
W1210 00:31:12.458980 869958 out.go:270] Dec 10 00:30:44 old-k8s-version-280963 kubelet[1066]: E1210 00:30:44.064113 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.458988 869958 out.go:270] Dec 10 00:30:54 old-k8s-version-280963 kubelet[1066]: E1210 00:30:54.063639 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.459000 869958 out.go:270] Dec 10 00:30:56 old-k8s-version-280963 kubelet[1066]: E1210 00:30:56.063846 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
W1210 00:31:12.459010 869958 out.go:270] Dec 10 00:31:06 old-k8s-version-280963 kubelet[1066]: E1210 00:31:06.063272 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
W1210 00:31:12.459023 869958 out.go:270] Dec 10 00:31:09 old-k8s-version-280963 kubelet[1066]: E1210 00:31:09.063618 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
I1210 00:31:12.459048 869958 out.go:358] Setting ErrFile to fd 2...
I1210 00:31:12.459062 869958 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1210 00:31:22.460270 869958 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1210 00:31:22.467259 869958 api_server.go:279] https://192.168.85.2:8443/healthz returned 200:
ok
I1210 00:31:22.469499 869958 out.go:201]
W1210 00:31:22.470824 869958 out.go:270] X Exiting due to K8S_UNHEALTHY_CONTROL_PLANE: wait 6m0s for node: wait for healthy API server: controlPlane never updated to v1.20.0
W1210 00:31:22.470878 869958 out.go:270] * Suggestion: Control Plane could not update, try minikube delete --all --purge
W1210 00:31:22.470901 869958 out.go:270] * Related issue: https://github.com/kubernetes/minikube/issues/11417
W1210 00:31:22.470913 869958 out.go:270] *
W1210 00:31:22.472041 869958 out.go:293] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
I1210 00:31:22.473975 869958 out.go:201]
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
bb6919a8e0b69 523cad1a4df73 2 minutes ago Exited dashboard-metrics-scraper 5 c9873f403fe96 dashboard-metrics-scraper-8d5bb5db8-h78fg
b9cf656a0d778 6e38f40d628db 5 minutes ago Running storage-provisioner 1 8c40f75d8ca10 storage-provisioner
71efdd7b3ff73 07655ddf2eebe 5 minutes ago Running kubernetes-dashboard 0 24a5323a71f00 kubernetes-dashboard-cd95d586-fnf8h
5b9539c213bd4 56cc512116c8f 5 minutes ago Running busybox 0 1ffdf86be7d65 busybox
1436cfab0a611 50415e5d05f05 5 minutes ago Running kindnet-cni 0 56dacd8cdace0 kindnet-bx7xb
d00996a438004 bfe3a36ebd252 5 minutes ago Running coredns 0 85216c9f961a1 coredns-74ff55c5b-45ksb
5ef64915e71a0 6e38f40d628db 5 minutes ago Exited storage-provisioner 0 8c40f75d8ca10 storage-provisioner
930a4290304a3 10cc881966cfd 5 minutes ago Running kube-proxy 0 d4c5cdc8681e7 kube-proxy-qb2z4
e24a785fdbd96 3138b6e3d4712 5 minutes ago Running kube-scheduler 0 35be6dcb0ff8e kube-scheduler-old-k8s-version-280963
7f21b5ae0b202 b9fa1895dcaa6 5 minutes ago Running kube-controller-manager 0 20d45a0b80ac3 kube-controller-manager-old-k8s-version-280963
9be25993b65b8 ca9843d3b5454 5 minutes ago Running kube-apiserver 0 c5f8af6106911 kube-apiserver-old-k8s-version-280963
de4e779f2f1e9 0369cf4303ffd 5 minutes ago Running etcd 0 d41231201c474 etcd-old-k8s-version-280963
==> containerd <==
Dec 10 00:27:30 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:27:30.077134275Z" level=info msg="CreateContainer within sandbox \"c9873f403fe9640d8069f4b43574c6195d7c72df3d5147c848c3ef8522e4cf5e\" for name:\"dashboard-metrics-scraper\" attempt:4 returns container id \"80a0427030a2cdb7c62305d17ec00897b150478fa62162fe0f460b5145f028ca\""
Dec 10 00:27:30 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:27:30.077643569Z" level=info msg="StartContainer for \"80a0427030a2cdb7c62305d17ec00897b150478fa62162fe0f460b5145f028ca\""
Dec 10 00:27:30 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:27:30.146335195Z" level=info msg="StartContainer for \"80a0427030a2cdb7c62305d17ec00897b150478fa62162fe0f460b5145f028ca\" returns successfully"
Dec 10 00:27:30 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:27:30.181532281Z" level=info msg="shim disconnected" id=80a0427030a2cdb7c62305d17ec00897b150478fa62162fe0f460b5145f028ca namespace=k8s.io
Dec 10 00:27:30 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:27:30.181608443Z" level=warning msg="cleaning up after shim disconnected" id=80a0427030a2cdb7c62305d17ec00897b150478fa62162fe0f460b5145f028ca namespace=k8s.io
Dec 10 00:27:30 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:27:30.181620431Z" level=info msg="cleaning up dead shim" namespace=k8s.io
Dec 10 00:27:30 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:27:30.551893217Z" level=info msg="RemoveContainer for \"17eb23f620f4e02ab9a50e85e472ab8e63b5ec7c0d8d67c3004de860409899b3\""
Dec 10 00:27:30 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:27:30.557563930Z" level=info msg="RemoveContainer for \"17eb23f620f4e02ab9a50e85e472ab8e63b5ec7c0d8d67c3004de860409899b3\" returns successfully"
Dec 10 00:28:36 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:28:36.063856259Z" level=info msg="PullImage \"fake.domain/registry.k8s.io/echoserver:1.4\""
Dec 10 00:28:36 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:28:36.087307580Z" level=info msg="trying next host" error="failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host" host=fake.domain
Dec 10 00:28:36 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:28:36.088712729Z" level=error msg="PullImage \"fake.domain/registry.k8s.io/echoserver:1.4\" failed" error="failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
Dec 10 00:28:36 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:28:36.088793391Z" level=info msg="stop pulling image fake.domain/registry.k8s.io/echoserver:1.4: active requests=0, bytes read=0"
Dec 10 00:28:55 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:28:55.065158341Z" level=info msg="CreateContainer within sandbox \"c9873f403fe9640d8069f4b43574c6195d7c72df3d5147c848c3ef8522e4cf5e\" for container name:\"dashboard-metrics-scraper\" attempt:5"
Dec 10 00:28:55 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:28:55.078186852Z" level=info msg="CreateContainer within sandbox \"c9873f403fe9640d8069f4b43574c6195d7c72df3d5147c848c3ef8522e4cf5e\" for name:\"dashboard-metrics-scraper\" attempt:5 returns container id \"bb6919a8e0b6952dc53dbfd18a32c5f406cbf4c685d039991f5c8a1dbb11e9d1\""
Dec 10 00:28:55 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:28:55.078991540Z" level=info msg="StartContainer for \"bb6919a8e0b6952dc53dbfd18a32c5f406cbf4c685d039991f5c8a1dbb11e9d1\""
Dec 10 00:28:55 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:28:55.172655371Z" level=info msg="StartContainer for \"bb6919a8e0b6952dc53dbfd18a32c5f406cbf4c685d039991f5c8a1dbb11e9d1\" returns successfully"
Dec 10 00:28:55 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:28:55.225267252Z" level=info msg="shim disconnected" id=bb6919a8e0b6952dc53dbfd18a32c5f406cbf4c685d039991f5c8a1dbb11e9d1 namespace=k8s.io
Dec 10 00:28:55 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:28:55.225357041Z" level=warning msg="cleaning up after shim disconnected" id=bb6919a8e0b6952dc53dbfd18a32c5f406cbf4c685d039991f5c8a1dbb11e9d1 namespace=k8s.io
Dec 10 00:28:55 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:28:55.225368648Z" level=info msg="cleaning up dead shim" namespace=k8s.io
Dec 10 00:28:55 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:28:55.727577708Z" level=info msg="RemoveContainer for \"80a0427030a2cdb7c62305d17ec00897b150478fa62162fe0f460b5145f028ca\""
Dec 10 00:28:55 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:28:55.732378341Z" level=info msg="RemoveContainer for \"80a0427030a2cdb7c62305d17ec00897b150478fa62162fe0f460b5145f028ca\" returns successfully"
Dec 10 00:31:21 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:31:21.067930147Z" level=info msg="PullImage \"fake.domain/registry.k8s.io/echoserver:1.4\""
Dec 10 00:31:21 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:31:21.092349255Z" level=info msg="trying next host" error="failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host" host=fake.domain
Dec 10 00:31:21 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:31:21.093804351Z" level=error msg="PullImage \"fake.domain/registry.k8s.io/echoserver:1.4\" failed" error="failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
Dec 10 00:31:21 old-k8s-version-280963 containerd[688]: time="2024-12-10T00:31:21.093868542Z" level=info msg="stop pulling image fake.domain/registry.k8s.io/echoserver:1.4: active requests=0, bytes read=0"
==> coredns [d00996a4380040decb2e6f3c9bcc65ff7f12c74a6f6817167177166144b883f0] <==
.:53
[INFO] plugin/reload: Running configuration MD5 = 093a0bf1423dd8c4eee62372bb216168
CoreDNS-1.7.0
linux/amd64, go1.14.4, f59c03d
[INFO] 127.0.0.1:33064 - 10903 "HINFO IN 842193102946151339.216109792973782087. udp 55 false 512" NXDOMAIN qr,rd,ra 55 0.010806784s
[INFO] plugin/ready: Still waiting on: "kubernetes"
.:53
[INFO] plugin/reload: Running configuration MD5 = 093a0bf1423dd8c4eee62372bb216168
CoreDNS-1.7.0
linux/amd64, go1.14.4, f59c03d
[INFO] 127.0.0.1:49030 - 26184 "HINFO IN 5024412679339346096.4343172623692224972. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.006611131s
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
I1210 00:26:01.758288 1 trace.go:116] Trace[1427131847]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125 (started: 2024-12-10 00:25:31.757204103 +0000 UTC m=+0.026713724) (total time: 30.000947075s):
Trace[1427131847]: [30.000947075s] [30.000947075s] END
E1210 00:26:01.758320 1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Endpoints: Get "https://10.96.0.1:443/api/v1/endpoints?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
I1210 00:26:01.758343 1 trace.go:116] Trace[939984059]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125 (started: 2024-12-10 00:25:31.757163262 +0000 UTC m=+0.026672881) (total time: 30.001013368s):
Trace[939984059]: [30.001013368s] [30.001013368s] END
E1210 00:26:01.758348 1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
I1210 00:26:01.758362 1 trace.go:116] Trace[2019727887]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125 (started: 2024-12-10 00:25:31.757210753 +0000 UTC m=+0.026720375) (total time: 30.000940131s):
Trace[2019727887]: [30.000940131s] [30.000940131s] END
E1210 00:26:01.758367 1 reflector.go:178] pkg/mod/k8s.io/client-go@v0.18.3/tools/cache/reflector.go:125: Failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
==> describe nodes <==
Name: old-k8s-version-280963
Roles: control-plane,master
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-280963
kubernetes.io/os=linux
minikube.k8s.io/commit=ef4b1d364e31f576638442321d9f6b3bc3aea9a9
minikube.k8s.io/name=old-k8s-version-280963
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_12_10T00_22_59_0700
minikube.k8s.io/version=v1.34.0
node-role.kubernetes.io/control-plane=
node-role.kubernetes.io/master=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Tue, 10 Dec 2024 00:22:55 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-280963
AcquireTime: <unset>
RenewTime: Tue, 10 Dec 2024 00:31:20 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Tue, 10 Dec 2024 00:26:29 +0000 Tue, 10 Dec 2024 00:22:51 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Tue, 10 Dec 2024 00:26:29 +0000 Tue, 10 Dec 2024 00:22:51 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Tue, 10 Dec 2024 00:26:29 +0000 Tue, 10 Dec 2024 00:22:51 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Tue, 10 Dec 2024 00:26:29 +0000 Tue, 10 Dec 2024 00:23:14 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.85.2
Hostname: old-k8s-version-280963
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859312Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859312Ki
pods: 110
System Info:
Machine ID: 10eb53500e5b428e93adc8f6266666f7
System UUID: b0688326-024d-4ebe-9ce3-d8f6ce47e462
Boot ID: 7d4fb23d-f380-43ef-b743-f39d55af0439
Kernel Version: 5.15.0-1071-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.7.22
Kubelet Version: v1.20.0
Kube-Proxy Version: v1.20.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (12 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m40s
kube-system coredns-74ff55c5b-45ksb 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 8m9s
kube-system etcd-old-k8s-version-280963 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 8m19s
kube-system kindnet-bx7xb 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 8m10s
kube-system kube-apiserver-old-k8s-version-280963 250m (3%) 0 (0%) 0 (0%) 0 (0%) 8m19s
kube-system kube-controller-manager-old-k8s-version-280963 200m (2%) 0 (0%) 0 (0%) 0 (0%) 8m18s
kube-system kube-proxy-qb2z4 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m10s
kube-system kube-scheduler-old-k8s-version-280963 100m (1%) 0 (0%) 0 (0%) 0 (0%) 8m19s
kube-system metrics-server-9975d5f86-9wg6p 100m (1%) 0 (0%) 200Mi (0%) 0 (0%) 6m30s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m8s
kubernetes-dashboard dashboard-metrics-scraper-8d5bb5db8-h78fg 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m34s
kubernetes-dashboard kubernetes-dashboard-cd95d586-fnf8h 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m34s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 950m (11%) 100m (1%)
memory 420Mi (1%) 220Mi (0%)
ephemeral-storage 100Mi (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal NodeHasSufficientMemory 8m34s (x5 over 8m34s) kubelet Node old-k8s-version-280963 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 8m34s (x4 over 8m34s) kubelet Node old-k8s-version-280963 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 8m34s (x3 over 8m34s) kubelet Node old-k8s-version-280963 status is now: NodeHasSufficientPID
Normal Starting 8m19s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 8m19s kubelet Node old-k8s-version-280963 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 8m19s kubelet Node old-k8s-version-280963 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 8m19s kubelet Node old-k8s-version-280963 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 8m19s kubelet Updated Node Allocatable limit across pods
Normal NodeReady 8m9s kubelet Node old-k8s-version-280963 status is now: NodeReady
Normal Starting 8m8s kube-proxy Starting kube-proxy.
Normal Starting 5m59s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 5m59s (x8 over 5m59s) kubelet Node old-k8s-version-280963 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 5m59s (x8 over 5m59s) kubelet Node old-k8s-version-280963 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 5m59s (x7 over 5m59s) kubelet Node old-k8s-version-280963 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 5m59s kubelet Updated Node Allocatable limit across pods
Normal Starting 5m52s kube-proxy Starting kube-proxy.
==> dmesg <==
[ +0.000015] ll header: 00000000: 02 42 9b 70 0c 4a 02 42 c0 a8 55 02 08 00
[ +1.028751] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-b7562d0c2da9
[ +0.000000] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-b7562d0c2da9
[ +0.000006] ll header: 00000000: 02 42 9b 70 0c 4a 02 42 c0 a8 55 02 08 00
[ +0.000001] ll header: 00000000: 02 42 9b 70 0c 4a 02 42 c0 a8 55 02 08 00
[ +0.003986] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-b7562d0c2da9
[ +0.000006] ll header: 00000000: 02 42 9b 70 0c 4a 02 42 c0 a8 55 02 08 00
[ +2.011796] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-b7562d0c2da9
[ +0.000000] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-b7562d0c2da9
[ +0.000000] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-b7562d0c2da9
[ +0.000007] ll header: 00000000: 02 42 9b 70 0c 4a 02 42 c0 a8 55 02 08 00
[ +0.000000] ll header: 00000000: 02 42 9b 70 0c 4a 02 42 c0 a8 55 02 08 00
[ +0.000001] ll header: 00000000: 02 42 9b 70 0c 4a 02 42 c0 a8 55 02 08 00
[ +4.159610] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-b7562d0c2da9
[ +0.000007] ll header: 00000000: 02 42 9b 70 0c 4a 02 42 c0 a8 55 02 08 00
[ -0.000001] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-b7562d0c2da9
[ +0.000005] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-b7562d0c2da9
[ +0.000001] ll header: 00000000: 02 42 9b 70 0c 4a 02 42 c0 a8 55 02 08 00
[ +0.000004] ll header: 00000000: 02 42 9b 70 0c 4a 02 42 c0 a8 55 02 08 00
[ +8.191241] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-b7562d0c2da9
[ +0.000006] ll header: 00000000: 02 42 9b 70 0c 4a 02 42 c0 a8 55 02 08 00
[ +0.003988] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-b7562d0c2da9
[ +0.000007] ll header: 00000000: 02 42 9b 70 0c 4a 02 42 c0 a8 55 02 08 00
[ -0.000001] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-b7562d0c2da9
[ +0.000005] ll header: 00000000: 02 42 9b 70 0c 4a 02 42 c0 a8 55 02 08 00
==> etcd [de4e779f2f1e9dbb4a147473498f66677a264e01c0c74453fc0137f378cf8ae2] <==
2024-12-10 00:27:26.017833 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:27:36.017866 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:27:46.017860 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:27:56.017709 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:28:06.017832 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:28:16.017797 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:28:26.017798 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:28:36.017951 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:28:46.017850 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:28:56.017887 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:29:01.172004 W | etcdserver: read-only range request "key:\"/registry/pods/kube-system/metrics-server-9975d5f86-9wg6p\" " with result "range_response_count:1 size:4324" took too long (107.20434ms) to execute
2024-12-10 00:29:06.017925 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:29:16.017758 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:29:26.017867 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:29:36.017783 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:29:46.017751 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:29:56.017896 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:30:06.017858 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:30:16.017877 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:30:26.018136 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:30:36.017911 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:30:46.017941 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:30:56.018043 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:31:06.017899 I | etcdserver/api/etcdhttp: /health OK (status code 200)
2024-12-10 00:31:16.017829 I | etcdserver/api/etcdhttp: /health OK (status code 200)
==> kernel <==
00:31:23 up 3:13, 0 users, load average: 0.73, 2.03, 2.29
Linux old-k8s-version-280963 5.15.0-1071-gcp #79~20.04.1-Ubuntu SMP Thu Oct 17 21:59:34 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kindnet [1436cfab0a61117409dfbc149a9ed46cffc35222a59e469b4357eb8eeb006a1a] <==
I1210 00:29:14.663089 1 main.go:301] handling current node
I1210 00:29:24.663067 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1210 00:29:24.663097 1 main.go:301] handling current node
I1210 00:29:34.656565 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1210 00:29:34.656607 1 main.go:301] handling current node
I1210 00:29:44.662965 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1210 00:29:44.663003 1 main.go:301] handling current node
I1210 00:29:54.662949 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1210 00:29:54.662993 1 main.go:301] handling current node
I1210 00:30:04.657195 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1210 00:30:04.657235 1 main.go:301] handling current node
I1210 00:30:14.661294 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1210 00:30:14.661345 1 main.go:301] handling current node
I1210 00:30:24.662944 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1210 00:30:24.662985 1 main.go:301] handling current node
I1210 00:30:34.655885 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1210 00:30:34.655928 1 main.go:301] handling current node
I1210 00:30:44.662928 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1210 00:30:44.662965 1 main.go:301] handling current node
I1210 00:30:54.658483 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1210 00:30:54.658865 1 main.go:301] handling current node
I1210 00:31:04.657800 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1210 00:31:04.657856 1 main.go:301] handling current node
I1210 00:31:14.659373 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1210 00:31:14.659517 1 main.go:301] handling current node
==> kube-apiserver [9be25993b65b8cdca34c64615c37d67ed96191f7e935d4aa5f3f20b8a71af72d] <==
I1210 00:28:00.119565 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I1210 00:28:00.119573 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
I1210 00:28:32.816941 1 client.go:360] parsed scheme: "passthrough"
I1210 00:28:32.816988 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I1210 00:28:32.816996 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
W1210 00:28:33.018965 1 handler_proxy.go:102] no RequestInfo found in the context
E1210 00:28:33.019051 1 controller.go:116] loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
I1210 00:28:33.019068 1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
I1210 00:29:16.713392 1 client.go:360] parsed scheme: "passthrough"
I1210 00:29:16.713449 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I1210 00:29:16.713456 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
I1210 00:29:49.955749 1 client.go:360] parsed scheme: "passthrough"
I1210 00:29:49.955799 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I1210 00:29:49.955807 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
I1210 00:30:28.129149 1 client.go:360] parsed scheme: "passthrough"
I1210 00:30:28.129202 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I1210 00:30:28.129210 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
W1210 00:30:30.457143 1 handler_proxy.go:102] no RequestInfo found in the context
E1210 00:30:30.457219 1 controller.go:116] loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
I1210 00:30:30.457228 1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
I1210 00:31:04.279735 1 client.go:360] parsed scheme: "passthrough"
I1210 00:31:04.279798 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 <nil> 0 <nil>}] <nil> <nil>}
I1210 00:31:04.279806 1 clientconn.go:948] ClientConn switching balancer to "pick_first"
==> kube-controller-manager [7f21b5ae0b202880d6305e4c384b15e18f96a0e7d3fdf3efa45355a2af113e82] <==
W1210 00:26:55.062773 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E1210 00:27:21.110798 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I1210 00:27:26.713153 1 request.go:655] Throttling request took 1.048541144s, request: GET:https://192.168.85.2:8443/apis/storage.k8s.io/v1beta1?timeout=32s
W1210 00:27:27.565100 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E1210 00:27:51.612634 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I1210 00:27:59.215523 1 request.go:655] Throttling request took 1.048363834s, request: GET:https://192.168.85.2:8443/apis/authentication.k8s.io/v1?timeout=32s
W1210 00:28:00.066913 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E1210 00:28:22.114461 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I1210 00:28:31.717270 1 request.go:655] Throttling request took 1.048772487s, request: GET:https://192.168.85.2:8443/apis/authentication.k8s.io/v1beta1?timeout=32s
W1210 00:28:32.568585 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E1210 00:28:52.616412 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I1210 00:29:04.219028 1 request.go:655] Throttling request took 1.048700542s, request: GET:https://192.168.85.2:8443/apis/autoscaling/v2beta1?timeout=32s
W1210 00:29:05.070245 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E1210 00:29:23.118122 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I1210 00:29:36.720672 1 request.go:655] Throttling request took 1.048711103s, request: GET:https://192.168.85.2:8443/apis/batch/v1beta1?timeout=32s
W1210 00:29:37.571976 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E1210 00:29:53.620074 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I1210 00:30:09.222258 1 request.go:655] Throttling request took 1.048571463s, request: GET:https://192.168.85.2:8443/apis/autoscaling/v2beta2?timeout=32s
W1210 00:30:10.073572 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E1210 00:30:24.121839 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I1210 00:30:41.723968 1 request.go:655] Throttling request took 1.048718327s, request: GET:https://192.168.85.2:8443/apis/rbac.authorization.k8s.io/v1beta1?timeout=32s
W1210 00:30:42.575045 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
E1210 00:30:54.623727 1 resource_quota_controller.go:409] unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: the server is currently unable to handle the request
I1210 00:31:14.225478 1 request.go:655] Throttling request took 1.048679773s, request: GET:https://192.168.85.2:8443/apis/coordination.k8s.io/v1?timeout=32s
W1210 00:31:15.076797 1 garbagecollector.go:703] failed to discover some groups: map[metrics.k8s.io/v1beta1:the server is currently unable to handle the request]
==> kube-proxy [930a4290304a3700a3b34bb588be1a8cb0fc8fc88f3c3adb4bc89453498c1ba1] <==
I1210 00:23:15.365671 1 node.go:172] Successfully retrieved node IP: 192.168.85.2
I1210 00:23:15.365784 1 server_others.go:142] kube-proxy node IP is an IPv4 address (192.168.85.2), assume IPv4 operation
W1210 00:23:15.431756 1 server_others.go:578] Unknown proxy mode "", assuming iptables proxy
I1210 00:23:15.431882 1 server_others.go:185] Using iptables Proxier.
I1210 00:23:15.432498 1 server.go:650] Version: v1.20.0
I1210 00:23:15.433615 1 config.go:315] Starting service config controller
I1210 00:23:15.433634 1 shared_informer.go:240] Waiting for caches to sync for service config
I1210 00:23:15.433666 1 config.go:224] Starting endpoint slice config controller
I1210 00:23:15.433670 1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
I1210 00:23:15.533780 1 shared_informer.go:247] Caches are synced for endpoint slice config
I1210 00:23:15.533867 1 shared_informer.go:247] Caches are synced for service config
I1210 00:25:31.771074 1 node.go:172] Successfully retrieved node IP: 192.168.85.2
I1210 00:25:31.771142 1 server_others.go:142] kube-proxy node IP is an IPv4 address (192.168.85.2), assume IPv4 operation
W1210 00:25:31.840145 1 server_others.go:578] Unknown proxy mode "", assuming iptables proxy
I1210 00:25:31.840257 1 server_others.go:185] Using iptables Proxier.
I1210 00:25:31.840546 1 server.go:650] Version: v1.20.0
I1210 00:25:31.841041 1 config.go:315] Starting service config controller
I1210 00:25:31.841051 1 config.go:224] Starting endpoint slice config controller
I1210 00:25:31.841080 1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
I1210 00:25:31.841067 1 shared_informer.go:240] Waiting for caches to sync for service config
I1210 00:25:31.941265 1 shared_informer.go:247] Caches are synced for service config
I1210 00:25:31.941302 1 shared_informer.go:247] Caches are synced for endpoint slice config
==> kube-scheduler [e24a785fdbd96316943085ea3d97c2bbf5698967fcac01b25a6a185f04e80b07] <==
E1210 00:22:55.660566 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1210 00:22:55.660664 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1210 00:22:55.660749 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1210 00:22:55.661487 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1210 00:22:55.661532 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1beta1.PodDisruptionBudget: failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1210 00:22:55.661592 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1210 00:22:56.693626 1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1210 00:22:56.801598 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1210 00:22:56.870447 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1210 00:22:56.954061 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1210 00:22:56.960399 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1210 00:22:56.989884 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1210 00:22:57.068922 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1beta1.PodDisruptionBudget: failed to list *v1beta1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1210 00:22:57.087861 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
I1210 00:22:59.556341 1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I1210 00:25:25.765542 1 serving.go:331] Generated self-signed cert in-memory
W1210 00:25:29.438592 1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W1210 00:25:29.438636 1 authentication.go:332] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W1210 00:25:29.438647 1 authentication.go:333] Continuing without authentication configuration. This may treat all requests as anonymous.
W1210 00:25:29.438657 1 authentication.go:334] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I1210 00:25:29.535210 1 secure_serving.go:197] Serving securely on 127.0.0.1:10259
I1210 00:25:29.535846 1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I1210 00:25:29.535869 1 shared_informer.go:240] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I1210 00:25:29.535896 1 tlsconfig.go:240] Starting DynamicServingCertificateController
I1210 00:25:29.637758 1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Dec 10 00:29:50 old-k8s-version-280963 kubelet[1066]: E1210 00:29:50.063838 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Dec 10 00:30:03 old-k8s-version-280963 kubelet[1066]: I1210 00:30:03.062916 1066 scope.go:95] [topologymanager] RemoveContainer - Container ID: bb6919a8e0b6952dc53dbfd18a32c5f406cbf4c685d039991f5c8a1dbb11e9d1
Dec 10 00:30:03 old-k8s-version-280963 kubelet[1066]: E1210 00:30:03.063224 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
Dec 10 00:30:05 old-k8s-version-280963 kubelet[1066]: E1210 00:30:05.063807 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Dec 10 00:30:17 old-k8s-version-280963 kubelet[1066]: E1210 00:30:17.063774 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Dec 10 00:30:18 old-k8s-version-280963 kubelet[1066]: I1210 00:30:18.063061 1066 scope.go:95] [topologymanager] RemoveContainer - Container ID: bb6919a8e0b6952dc53dbfd18a32c5f406cbf4c685d039991f5c8a1dbb11e9d1
Dec 10 00:30:18 old-k8s-version-280963 kubelet[1066]: E1210 00:30:18.063380 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
Dec 10 00:30:29 old-k8s-version-280963 kubelet[1066]: I1210 00:30:29.062967 1066 scope.go:95] [topologymanager] RemoveContainer - Container ID: bb6919a8e0b6952dc53dbfd18a32c5f406cbf4c685d039991f5c8a1dbb11e9d1
Dec 10 00:30:29 old-k8s-version-280963 kubelet[1066]: E1210 00:30:29.063392 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
Dec 10 00:30:32 old-k8s-version-280963 kubelet[1066]: E1210 00:30:32.063891 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Dec 10 00:30:41 old-k8s-version-280963 kubelet[1066]: I1210 00:30:41.062887 1066 scope.go:95] [topologymanager] RemoveContainer - Container ID: bb6919a8e0b6952dc53dbfd18a32c5f406cbf4c685d039991f5c8a1dbb11e9d1
Dec 10 00:30:41 old-k8s-version-280963 kubelet[1066]: E1210 00:30:41.063206 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
Dec 10 00:30:44 old-k8s-version-280963 kubelet[1066]: E1210 00:30:44.064113 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Dec 10 00:30:54 old-k8s-version-280963 kubelet[1066]: I1210 00:30:54.063223 1066 scope.go:95] [topologymanager] RemoveContainer - Container ID: bb6919a8e0b6952dc53dbfd18a32c5f406cbf4c685d039991f5c8a1dbb11e9d1
Dec 10 00:30:54 old-k8s-version-280963 kubelet[1066]: E1210 00:30:54.063639 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
Dec 10 00:30:56 old-k8s-version-280963 kubelet[1066]: E1210 00:30:56.063846 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Dec 10 00:31:06 old-k8s-version-280963 kubelet[1066]: I1210 00:31:06.062932 1066 scope.go:95] [topologymanager] RemoveContainer - Container ID: bb6919a8e0b6952dc53dbfd18a32c5f406cbf4c685d039991f5c8a1dbb11e9d1
Dec 10 00:31:06 old-k8s-version-280963 kubelet[1066]: E1210 00:31:06.063272 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
Dec 10 00:31:09 old-k8s-version-280963 kubelet[1066]: E1210 00:31:09.063618 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ImagePullBackOff: "Back-off pulling image \"fake.domain/registry.k8s.io/echoserver:1.4\""
Dec 10 00:31:21 old-k8s-version-280963 kubelet[1066]: I1210 00:31:21.063636 1066 scope.go:95] [topologymanager] RemoveContainer - Container ID: bb6919a8e0b6952dc53dbfd18a32c5f406cbf4c685d039991f5c8a1dbb11e9d1
Dec 10 00:31:21 old-k8s-version-280963 kubelet[1066]: E1210 00:31:21.064204 1066 pod_workers.go:191] Error syncing pod 276e856b-6a65-4d6b-af30-164aa8e39d64 ("dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"), skipping: failed to "StartContainer" for "dashboard-metrics-scraper" with CrashLoopBackOff: "back-off 2m40s restarting failed container=dashboard-metrics-scraper pod=dashboard-metrics-scraper-8d5bb5db8-h78fg_kubernetes-dashboard(276e856b-6a65-4d6b-af30-164aa8e39d64)"
Dec 10 00:31:21 old-k8s-version-280963 kubelet[1066]: E1210 00:31:21.094113 1066 remote_image.go:113] PullImage "fake.domain/registry.k8s.io/echoserver:1.4" from image service failed: rpc error: code = Unknown desc = failed to pull and unpack image "fake.domain/registry.k8s.io/echoserver:1.4": failed to resolve reference "fake.domain/registry.k8s.io/echoserver:1.4": failed to do request: Head "https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host
Dec 10 00:31:21 old-k8s-version-280963 kubelet[1066]: E1210 00:31:21.094203 1066 kuberuntime_image.go:51] Pull image "fake.domain/registry.k8s.io/echoserver:1.4" failed: rpc error: code = Unknown desc = failed to pull and unpack image "fake.domain/registry.k8s.io/echoserver:1.4": failed to resolve reference "fake.domain/registry.k8s.io/echoserver:1.4": failed to do request: Head "https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host
Dec 10 00:31:21 old-k8s-version-280963 kubelet[1066]: E1210 00:31:21.094471 1066 kuberuntime_manager.go:829] container &Container{Name:metrics-server,Image:fake.domain/registry.k8s.io/echoserver:1.4,Command:[],Args:[--cert-dir=/tmp --secure-port=4443 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --kubelet-use-node-status-port --metric-resolution=60s --kubelet-insecure-tls],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:4443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{100 -3} {<nil>} 100m DecimalSI},memory: {{209715200 0} {<nil>} BinarySI},},},VolumeMounts:[]VolumeMount{VolumeMount{Name:tmp-dir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:metrics-server-token-8dbfh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:&Probe{Handler:Handler{Exe
c:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{1 0 https},Host:,Scheme:HTTPS,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,},ReadinessProbe:&Probe{Handler:Handler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{1 0 https},Host:,Scheme:HTTPS,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod metrics-server-9975d5f86-9wg6p_kube-system(094fa3
45-5ab4-498e-8f36-9c97dd546a69): ErrImagePull: rpc error: code = Unknown desc = failed to pull and unpack image "fake.domain/registry.k8s.io/echoserver:1.4": failed to resolve reference "fake.domain/registry.k8s.io/echoserver:1.4": failed to do request: Head "https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host
Dec 10 00:31:21 old-k8s-version-280963 kubelet[1066]: E1210 00:31:21.094538 1066 pod_workers.go:191] Error syncing pod 094fa345-5ab4-498e-8f36-9c97dd546a69 ("metrics-server-9975d5f86-9wg6p_kube-system(094fa345-5ab4-498e-8f36-9c97dd546a69)"), skipping: failed to "StartContainer" for "metrics-server" with ErrImagePull: "rpc error: code = Unknown desc = failed to pull and unpack image \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to resolve reference \"fake.domain/registry.k8s.io/echoserver:1.4\": failed to do request: Head \"https://fake.domain/v2/registry.k8s.io/echoserver/manifests/1.4\": dial tcp: lookup fake.domain on 192.168.85.1:53: no such host"
==> kubernetes-dashboard [71efdd7b3ff7301fc234121aa9b522c561e17518296c9ac0f096808fd717194e] <==
2024/12/10 00:25:54 Starting overwatch
2024/12/10 00:25:54 Using namespace: kubernetes-dashboard
2024/12/10 00:25:54 Using in-cluster config to connect to apiserver
2024/12/10 00:25:54 Using secret token for csrf signing
2024/12/10 00:25:54 Initializing csrf token from kubernetes-dashboard-csrf secret
2024/12/10 00:25:54 Empty token. Generating and storing in a secret kubernetes-dashboard-csrf
2024/12/10 00:25:54 Successful initial request to the apiserver, version: v1.20.0
2024/12/10 00:25:54 Generating JWE encryption key
2024/12/10 00:25:54 New synchronizer has been registered: kubernetes-dashboard-key-holder-kubernetes-dashboard. Starting
2024/12/10 00:25:54 Starting secret synchronizer for kubernetes-dashboard-key-holder in namespace kubernetes-dashboard
2024/12/10 00:25:54 Initializing JWE encryption key from synchronized object
2024/12/10 00:25:54 Creating in-cluster Sidecar client
2024/12/10 00:25:54 Serving insecurely on HTTP port: 9090
2024/12/10 00:25:54 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/12/10 00:26:24 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/12/10 00:26:54 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/12/10 00:27:24 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/12/10 00:27:54 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/12/10 00:28:24 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/12/10 00:28:54 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/12/10 00:29:24 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/12/10 00:29:54 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/12/10 00:30:24 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2024/12/10 00:30:54 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
==> storage-provisioner [5ef64915e71a0a82e907f051bd349d35990910b7707e2189239897f76b8fcf24] <==
I1210 00:23:16.069372 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1210 00:23:16.079884 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1210 00:23:16.081397 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1210 00:23:16.093778 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1210 00:23:16.093935 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"de264d30-1f19-40ae-92c8-748638df9b78", APIVersion:"v1", ResourceVersion:"466", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-280963_86df2f53-e0e5-48a9-9f77-0924d9396378 became leader
I1210 00:23:16.094038 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-280963_86df2f53-e0e5-48a9-9f77-0924d9396378!
I1210 00:23:16.194994 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-280963_86df2f53-e0e5-48a9-9f77-0924d9396378!
I1210 00:25:31.653234 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
F1210 00:26:01.667586 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout
==> storage-provisioner [b9cf656a0d778fa858636c57f7ed856932e9c797614d0b2a0bb2b7b183d0444e] <==
I1210 00:26:02.456489 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1210 00:26:02.493301 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1210 00:26:02.495367 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1210 00:26:19.911277 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1210 00:26:19.912061 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-280963_96f0aa2a-3dc2-4166-9ffb-ab63cc136c72!
I1210 00:26:19.912059 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"de264d30-1f19-40ae-92c8-748638df9b78", APIVersion:"v1", ResourceVersion:"819", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-280963_96f0aa2a-3dc2-4166-9ffb-ab63cc136c72 became leader
I1210 00:26:20.013665 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-280963_96f0aa2a-3dc2-4166-9ffb-ab63cc136c72!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-280963 -n old-k8s-version-280963
helpers_test.go:261: (dbg) Run: kubectl --context old-k8s-version-280963 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: metrics-server-9975d5f86-9wg6p
helpers_test.go:274: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/SecondStart]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context old-k8s-version-280963 describe pod metrics-server-9975d5f86-9wg6p
helpers_test.go:277: (dbg) Non-zero exit: kubectl --context old-k8s-version-280963 describe pod metrics-server-9975d5f86-9wg6p: exit status 1 (64.377289ms)
** stderr **
Error from server (NotFound): pods "metrics-server-9975d5f86-9wg6p" not found
** /stderr **
helpers_test.go:279: kubectl --context old-k8s-version-280963 describe pod metrics-server-9975d5f86-9wg6p: exit status 1
--- FAIL: TestStartStop/group/old-k8s-version/serial/SecondStart (379.68s)