=== RUN TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-295154 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [54baf2f4-8de5-4f66-92ac-f5315174d940] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [54baf2f4-8de5-4f66-92ac-f5315174d940] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 9.003343341s
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-295154 exec busybox -- /bin/sh -c "ulimit -n"
start_stop_delete_test.go:194: 'ulimit -n' returned 1024, expected 1048576
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-295154
helpers_test.go:243: (dbg) docker inspect old-k8s-version-295154:
-- stdout --
[
{
"Id": "1d2dc93defe08823e969abc1083166e5b987c49003d867c47f6dab538c73042e",
"Created": "2025-11-29T09:01:32.670265754Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 494787,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-29T09:01:32.709136408Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:133ca4ac39008d0056ad45d8cb70521d6b70d6e1b8bbff4678fd4b354efbdf70",
"ResolvConfPath": "/var/lib/docker/containers/1d2dc93defe08823e969abc1083166e5b987c49003d867c47f6dab538c73042e/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/1d2dc93defe08823e969abc1083166e5b987c49003d867c47f6dab538c73042e/hostname",
"HostsPath": "/var/lib/docker/containers/1d2dc93defe08823e969abc1083166e5b987c49003d867c47f6dab538c73042e/hosts",
"LogPath": "/var/lib/docker/containers/1d2dc93defe08823e969abc1083166e5b987c49003d867c47f6dab538c73042e/1d2dc93defe08823e969abc1083166e5b987c49003d867c47f6dab538c73042e-json.log",
"Name": "/old-k8s-version-295154",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-295154:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-295154",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "1d2dc93defe08823e969abc1083166e5b987c49003d867c47f6dab538c73042e",
"LowerDir": "/var/lib/docker/overlay2/10e010eea53c4090a92173793351457113c92b95e4addfb0007c310be02782d4-init/diff:/var/lib/docker/overlay2/eb180691bce18b8d981b2d61ed0962851c615364ed77c18ff66d559424569005/diff",
"MergedDir": "/var/lib/docker/overlay2/10e010eea53c4090a92173793351457113c92b95e4addfb0007c310be02782d4/merged",
"UpperDir": "/var/lib/docker/overlay2/10e010eea53c4090a92173793351457113c92b95e4addfb0007c310be02782d4/diff",
"WorkDir": "/var/lib/docker/overlay2/10e010eea53c4090a92173793351457113c92b95e4addfb0007c310be02782d4/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "old-k8s-version-295154",
"Source": "/var/lib/docker/volumes/old-k8s-version-295154/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "old-k8s-version-295154",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-295154",
"name.minikube.sigs.k8s.io": "old-k8s-version-295154",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "d61dde634f57a1405987eb1bcb1468d94550e880fe30f55b1f686d12c8c280ee",
"SandboxKey": "/var/run/docker/netns/d61dde634f57",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33058"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33059"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33062"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33060"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33061"
}
]
},
"Networks": {
"old-k8s-version-295154": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "aea341d97cf5d4f6668e24ade3efa38cebbca9060f995994226a6ded161b076c",
"EndpointID": "7f306b5e076751e147ce07bdf687dd5284be41e6bffcdf4542e80d7a90deb9e2",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"MacAddress": "e6:d5:92:ca:f6:04",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-295154",
"1d2dc93defe0"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-295154 -n old-k8s-version-295154
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-295154 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-295154 logs -n 25: (1.145289555s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────┬─────────┬─────────┬─────────────────────┬────────────
─────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────┼─────────┼─────────┼─────────────────────┼────────────
─────────┤
│ ssh │ -p cilium-770004 sudo systemctl status containerd --all --full --no-pager │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo systemctl cat containerd --no-pager │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo cat /lib/systemd/system/containerd.service │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo cat /etc/containerd/config.toml │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo containerd config dump │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo systemctl status crio --all --full --no-pager │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo systemctl cat crio --no-pager │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo crio config │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ delete │ -p cilium-770004 │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ 29 Nov 25 09:00 UTC │
│ start │ -p force-systemd-env-693869 --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-env-693869 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ 29 Nov 25 09:01 UTC │
│ start │ -p pause-563162 --alsologtostderr -v=1 --driver=docker --container-runtime=containerd │ pause-563162 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ 29 Nov 25 09:01 UTC │
│ ssh │ force-systemd-env-693869 ssh cat /etc/containerd/config.toml │ force-systemd-env-693869 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ delete │ -p force-systemd-env-693869 │ force-systemd-env-693869 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ start │ -p cert-options-536258 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-536258 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ pause │ -p pause-563162 --alsologtostderr -v=5 │ pause-563162 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ unpause │ -p pause-563162 --alsologtostderr -v=5 │ pause-563162 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ pause │ -p pause-563162 --alsologtostderr -v=5 │ pause-563162 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ delete │ -p pause-563162 --alsologtostderr -v=5 │ pause-563162 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ ssh │ cert-options-536258 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-536258 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ ssh │ -p cert-options-536258 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-536258 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ delete │ -p cert-options-536258 │ cert-options-536258 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ delete │ -p pause-563162 │ pause-563162 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ start │ -p old-k8s-version-295154 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-295154 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:02 UTC │
│ start │ -p no-preload-924441 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ no-preload-924441 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:02 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────┴─────────┴─────────┴─────────────────────┴────────────
─────────┘
==> Last Start <==
Log file created at: 2025/11/29 09:01:29
Running on machine: ubuntu-20-agent-10
Binary: Built with gc go1.25.3 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1129 09:01:26.371812 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:01:26.372231 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:01:26.372304 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:01:26.372374 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:01:26.406988 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:26.407016 460401 cri.go:89] found id: "40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac"
I1129 09:01:26.407022 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:26.407027 460401 cri.go:89] found id: ""
I1129 09:01:26.407038 460401 logs.go:282] 3 containers: [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:01:26.407111 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.413707 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.419492 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.424920 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:01:26.424999 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:01:26.456369 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:26.456395 460401 cri.go:89] found id: ""
I1129 09:01:26.456406 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:01:26.456466 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.462064 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:01:26.462133 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:01:26.492837 460401 cri.go:89] found id: ""
I1129 09:01:26.492868 460401 logs.go:282] 0 containers: []
W1129 09:01:26.492879 460401 logs.go:284] No container was found matching "coredns"
I1129 09:01:26.492887 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:01:26.492955 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:01:26.521715 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:26.521747 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:26.521754 460401 cri.go:89] found id: ""
I1129 09:01:26.521763 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:01:26.521821 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.526872 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.531295 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:01:26.531353 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:01:26.558218 460401 cri.go:89] found id: ""
I1129 09:01:26.558248 460401 logs.go:282] 0 containers: []
W1129 09:01:26.558257 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:01:26.558264 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:01:26.558313 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:01:26.587221 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:26.587246 460401 cri.go:89] found id: "f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:26.587253 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:26.587258 460401 cri.go:89] found id: ""
I1129 09:01:26.587268 460401 logs.go:282] 3 containers: [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:01:26.587328 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.591954 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.596055 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.600163 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:01:26.600219 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:01:26.628586 460401 cri.go:89] found id: ""
I1129 09:01:26.628613 460401 logs.go:282] 0 containers: []
W1129 09:01:26.628624 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:01:26.628633 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:01:26.628690 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:01:26.657553 460401 cri.go:89] found id: ""
I1129 09:01:26.657581 460401 logs.go:282] 0 containers: []
W1129 09:01:26.657591 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:01:26.657603 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:01:26.657622 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:01:26.721559 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:01:26.721584 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:01:26.721601 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:26.756136 460401 logs.go:123] Gathering logs for kube-controller-manager [f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00] ...
I1129 09:01:26.756165 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:26.787789 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:01:26.787827 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:01:26.838908 460401 logs.go:123] Gathering logs for container status ...
I1129 09:01:26.838943 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:01:26.875689 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:01:26.875723 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:01:26.946907 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:01:26.946941 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:26.982883 460401 logs.go:123] Gathering logs for kube-apiserver [40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac] ...
I1129 09:01:26.982919 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac"
W1129 09:01:27.012923 460401 logs.go:130] failed kube-apiserver [40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac]: command: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac" /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac": Process exited with status 1
stdout:
stderr:
E1129 09:01:27.010611 2688 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac\": not found" containerID="40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac"
time="2025-11-29T09:01:27Z" level=fatal msg="rpc error: code = NotFound desc = an error occurred when try to find container \"40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac\": not found"
output:
** stderr **
E1129 09:01:27.010611 2688 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac\": not found" containerID="40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac"
time="2025-11-29T09:01:27Z" level=fatal msg="rpc error: code = NotFound desc = an error occurred when try to find container \"40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac\": not found"
** /stderr **
I1129 09:01:27.012941 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:01:27.012953 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:27.051493 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:01:27.051526 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:27.089722 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:01:27.089755 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:27.138471 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:01:27.138504 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:27.172932 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:01:27.172962 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:27.207844 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:01:27.207878 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:01:29.500031 494126 out.go:360] Setting OutFile to fd 1 ...
I1129 09:01:29.500142 494126 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1129 09:01:29.500153 494126 out.go:374] Setting ErrFile to fd 2...
I1129 09:01:29.500159 494126 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1129 09:01:29.500372 494126 root.go:338] Updating PATH: /home/jenkins/minikube-integration/22000-255825/.minikube/bin
I1129 09:01:29.500882 494126 out.go:368] Setting JSON to false
I1129 09:01:29.501996 494126 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-10","uptime":6233,"bootTime":1764400656,"procs":294,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1129 09:01:29.502070 494126 start.go:143] virtualization: kvm guest
I1129 09:01:29.506976 494126 out.go:179] * [no-preload-924441] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1129 09:01:29.508162 494126 out.go:179] - MINIKUBE_LOCATION=22000
I1129 09:01:29.508182 494126 notify.go:221] Checking for updates...
I1129 09:01:29.510318 494126 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1129 09:01:29.511334 494126 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/22000-255825/kubeconfig
I1129 09:01:29.516252 494126 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/22000-255825/.minikube
I1129 09:01:29.517321 494126 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1129 09:01:29.518374 494126 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1129 09:01:29.519877 494126 config.go:182] Loaded profile config "cert-expiration-368536": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1129 09:01:29.519989 494126 config.go:182] Loaded profile config "kubernetes-upgrade-806701": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1129 09:01:29.520095 494126 config.go:182] Loaded profile config "old-k8s-version-295154": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1129 09:01:29.520225 494126 driver.go:422] Setting default libvirt URI to qemu:///system
I1129 09:01:29.546023 494126 docker.go:124] docker version: linux-29.1.1:Docker Engine - Community
I1129 09:01:29.546141 494126 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1129 09:01:29.607775 494126 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:63 OomKillDisable:false NGoroutines:81 SystemTime:2025-11-29 09:01:29.596891851 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652068352 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:29.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:1c4457e00facac03ce1d75f7b6777a7a851e5c41 Expected:} RuncCommit:{ID:v1.3.4-0-gd6d73eb8 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1129 09:01:29.607908 494126 docker.go:319] overlay module found
I1129 09:01:29.610288 494126 out.go:179] * Using the docker driver based on user configuration
I1129 09:01:29.611200 494126 start.go:309] selected driver: docker
I1129 09:01:29.611220 494126 start.go:927] validating driver "docker" against <nil>
I1129 09:01:29.611231 494126 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1129 09:01:29.611850 494126 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1129 09:01:29.673266 494126 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:63 OomKillDisable:false NGoroutines:81 SystemTime:2025-11-29 09:01:29.662655452 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652068352 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:29.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:1c4457e00facac03ce1d75f7b6777a7a851e5c41 Expected:} RuncCommit:{ID:v1.3.4-0-gd6d73eb8 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1129 09:01:29.673484 494126 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1129 09:01:29.673822 494126 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1129 09:01:29.675454 494126 out.go:179] * Using Docker driver with root privileges
I1129 09:01:29.679127 494126 cni.go:84] Creating CNI manager for ""
I1129 09:01:29.679243 494126 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1129 09:01:29.679264 494126 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1129 09:01:29.679351 494126 start.go:353] cluster config:
{Name:no-preload-924441 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-924441 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Containe
rRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1129 09:01:29.680591 494126 out.go:179] * Starting "no-preload-924441" primary control-plane node in "no-preload-924441" cluster
I1129 09:01:29.681517 494126 cache.go:134] Beginning downloading kic base image for docker with containerd
I1129 09:01:29.682533 494126 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1129 09:01:29.683845 494126 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1129 09:01:29.683975 494126 profile.go:143] Saving config to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/config.json ...
I1129 09:01:29.683971 494126 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1129 09:01:29.684042 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/config.json: {Name:mk4df9140f26fdbfe5b2addb71b44607d26b26a3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:29.684181 494126 cache.go:107] acquiring lock: {Name:mka90f7eac55a6e5d6d9651fc108f327509b562f Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684233 494126 cache.go:107] acquiring lock: {Name:mk2c250a4202b546a18f0cc7664314439a4ec834 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684259 494126 cache.go:107] acquiring lock: {Name:mk976aaa4e01b0c9e83cc6925b8c3c72804bfa25 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684288 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 exists
I1129 09:01:29.684299 494126 cache.go:96] cache image "gcr.io/k8s-minikube/storage-provisioner:v5" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5" took 144.373µs
I1129 09:01:29.684315 494126 cache.go:80] save to tar file gcr.io/k8s-minikube/storage-provisioner:v5 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 succeeded
I1129 09:01:29.684321 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 exists
I1129 09:01:29.684322 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 exists
I1129 09:01:29.684332 494126 cache.go:96] cache image "registry.k8s.io/kube-apiserver:v1.34.1" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1" took 80.37µs
I1129 09:01:29.684333 494126 cache.go:96] cache image "registry.k8s.io/kube-proxy:v1.34.1" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1" took 119.913µs
I1129 09:01:29.684341 494126 cache.go:80] save to tar file registry.k8s.io/kube-apiserver:v1.34.1 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 succeeded
I1129 09:01:29.684344 494126 cache.go:80] save to tar file registry.k8s.io/kube-proxy:v1.34.1 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 succeeded
I1129 09:01:29.684332 494126 cache.go:107] acquiring lock: {Name:mkff44f5b6b961ddaa9acc3e74cf0480b0d2f776 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684358 494126 cache.go:107] acquiring lock: {Name:mk6080f4393a19fb5c4d6f436dce1a2bb1688f86 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684378 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 exists
I1129 09:01:29.684387 494126 cache.go:96] cache image "registry.k8s.io/coredns/coredns:v1.12.1" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1" took 58.113µs
I1129 09:01:29.684395 494126 cache.go:80] save to tar file registry.k8s.io/coredns/coredns:v1.12.1 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 succeeded
I1129 09:01:29.684399 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 exists
I1129 09:01:29.684282 494126 cache.go:107] acquiring lock: {Name:mkb8e7a67c98a0b8caa208116d415323f5ca7ccc Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684410 494126 cache.go:107] acquiring lock: {Name:mk47ee24ca074cb6cc1a641d737215686b099dc0 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684472 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 exists
I1129 09:01:29.684482 494126 cache.go:96] cache image "registry.k8s.io/kube-controller-manager:v1.34.1" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1" took 217.393µs
I1129 09:01:29.684492 494126 cache.go:80] save to tar file registry.k8s.io/kube-controller-manager:v1.34.1 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 succeeded
I1129 09:01:29.684416 494126 cache.go:96] cache image "registry.k8s.io/pause:3.10.1" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1" took 61.464µs
I1129 09:01:29.684504 494126 cache.go:80] save to tar file registry.k8s.io/pause:3.10.1 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 succeeded
I1129 09:01:29.684517 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 exists
I1129 09:01:29.684533 494126 cache.go:96] cache image "registry.k8s.io/kube-scheduler:v1.34.1" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1" took 171.692µs
I1129 09:01:29.684552 494126 cache.go:80] save to tar file registry.k8s.io/kube-scheduler:v1.34.1 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 succeeded
I1129 09:01:29.684643 494126 cache.go:107] acquiring lock: {Name:mk912246de843459c104f342794e23ecb1fc7a75 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684790 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 exists
I1129 09:01:29.684806 494126 cache.go:96] cache image "registry.k8s.io/etcd:3.6.4-0" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0" took 226.111µs
I1129 09:01:29.684824 494126 cache.go:80] save to tar file registry.k8s.io/etcd:3.6.4-0 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 succeeded
I1129 09:01:29.684840 494126 cache.go:87] Successfully saved all images to host disk.
I1129 09:01:29.706829 494126 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1129 09:01:29.706854 494126 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1129 09:01:29.706878 494126 cache.go:243] Successfully downloaded all kic artifacts
I1129 09:01:29.706918 494126 start.go:360] acquireMachinesLock for no-preload-924441: {Name:mkf9f3b6b30f178cf9b9d50a2dabce8e2c5d48f0 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.707056 494126 start.go:364] duration metric: took 99.455µs to acquireMachinesLock for "no-preload-924441"
I1129 09:01:29.707090 494126 start.go:93] Provisioning new machine with config: &{Name:no-preload-924441 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-924441 Namespace:default APIServerHAVIP: APIServer
Name:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1129 09:01:29.707206 494126 start.go:125] createHost starting for "" (driver="docker")
I1129 09:01:28.461537 493486 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1129 09:01:28.461867 493486 start.go:159] libmachine.API.Create for "old-k8s-version-295154" (driver="docker")
I1129 09:01:28.461917 493486 client.go:173] LocalClient.Create starting
I1129 09:01:28.462009 493486 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem
I1129 09:01:28.462065 493486 main.go:143] libmachine: Decoding PEM data...
I1129 09:01:28.462089 493486 main.go:143] libmachine: Parsing certificate...
I1129 09:01:28.462160 493486 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem
I1129 09:01:28.462186 493486 main.go:143] libmachine: Decoding PEM data...
I1129 09:01:28.462205 493486 main.go:143] libmachine: Parsing certificate...
I1129 09:01:28.462679 493486 cli_runner.go:164] Run: docker network inspect old-k8s-version-295154 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1129 09:01:28.481658 493486 cli_runner.go:211] docker network inspect old-k8s-version-295154 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1129 09:01:28.481745 493486 network_create.go:284] running [docker network inspect old-k8s-version-295154] to gather additional debugging logs...
I1129 09:01:28.481770 493486 cli_runner.go:164] Run: docker network inspect old-k8s-version-295154
W1129 09:01:28.500619 493486 cli_runner.go:211] docker network inspect old-k8s-version-295154 returned with exit code 1
I1129 09:01:28.500661 493486 network_create.go:287] error running [docker network inspect old-k8s-version-295154]: docker network inspect old-k8s-version-295154: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-295154 not found
I1129 09:01:28.500677 493486 network_create.go:289] output of [docker network inspect old-k8s-version-295154]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-295154 not found
** /stderr **
I1129 09:01:28.500849 493486 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1129 09:01:28.518426 493486 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-f69c672bf913 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:26:40:f4:ed:4f:ab} reservation:<nil>}
I1129 09:01:28.519384 493486 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-96d20aff5877 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:c2:01:e2:a3:b8:33} reservation:<nil>}
I1129 09:01:28.520407 493486 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-f7906c56f869 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:06:29:75:e3:e0:7f} reservation:<nil>}
I1129 09:01:28.521974 493486 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001f90700}
I1129 09:01:28.522028 493486 network_create.go:124] attempt to create docker network old-k8s-version-295154 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I1129 09:01:28.522109 493486 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-295154 old-k8s-version-295154
I1129 09:01:28.575478 493486 network_create.go:108] docker network old-k8s-version-295154 192.168.76.0/24 created
I1129 09:01:28.575522 493486 kic.go:121] calculated static IP "192.168.76.2" for the "old-k8s-version-295154" container
I1129 09:01:28.575603 493486 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1129 09:01:28.593666 493486 cli_runner.go:164] Run: docker volume create old-k8s-version-295154 --label name.minikube.sigs.k8s.io=old-k8s-version-295154 --label created_by.minikube.sigs.k8s.io=true
I1129 09:01:28.612389 493486 oci.go:103] Successfully created a docker volume old-k8s-version-295154
I1129 09:01:28.612501 493486 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-295154-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-295154 --entrypoint /usr/bin/test -v old-k8s-version-295154:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1129 09:01:29.238109 493486 oci.go:107] Successfully prepared a docker volume old-k8s-version-295154
I1129 09:01:29.238162 493486 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1129 09:01:29.238176 493486 kic.go:194] Starting extracting preloaded images to volume ...
I1129 09:01:29.238241 493486 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/22000-255825/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-295154:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir
I1129 09:01:32.586626 493486 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/22000-255825/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-295154:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir: (3.348341473s)
I1129 09:01:32.586660 493486 kic.go:203] duration metric: took 3.348481997s to extract preloaded images to volume ...
W1129 09:01:32.586761 493486 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1129 09:01:32.586805 493486 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1129 09:01:32.586861 493486 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1129 09:01:32.650922 493486 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-295154 --name old-k8s-version-295154 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-295154 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-295154 --network old-k8s-version-295154 --ip 192.168.76.2 --volume old-k8s-version-295154:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1129 09:01:32.982372 493486 cli_runner.go:164] Run: docker container inspect old-k8s-version-295154 --format={{.State.Running}}
I1129 09:01:33.001073 493486 cli_runner.go:164] Run: docker container inspect old-k8s-version-295154 --format={{.State.Status}}
I1129 09:01:33.021021 493486 cli_runner.go:164] Run: docker exec old-k8s-version-295154 stat /var/lib/dpkg/alternatives/iptables
I1129 09:01:33.078706 493486 oci.go:144] the created container "old-k8s-version-295154" has a running status.
I1129 09:01:33.078890 493486 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa...
I1129 09:01:33.213970 493486 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1129 09:01:33.251103 493486 cli_runner.go:164] Run: docker container inspect old-k8s-version-295154 --format={{.State.Status}}
I1129 09:01:29.709142 494126 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1129 09:01:29.709367 494126 start.go:159] libmachine.API.Create for "no-preload-924441" (driver="docker")
I1129 09:01:29.709398 494126 client.go:173] LocalClient.Create starting
I1129 09:01:29.709475 494126 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem
I1129 09:01:29.709526 494126 main.go:143] libmachine: Decoding PEM data...
I1129 09:01:29.709553 494126 main.go:143] libmachine: Parsing certificate...
I1129 09:01:29.709629 494126 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem
I1129 09:01:29.709661 494126 main.go:143] libmachine: Decoding PEM data...
I1129 09:01:29.709679 494126 main.go:143] libmachine: Parsing certificate...
I1129 09:01:29.710082 494126 cli_runner.go:164] Run: docker network inspect no-preload-924441 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1129 09:01:29.727862 494126 cli_runner.go:211] docker network inspect no-preload-924441 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1129 09:01:29.727982 494126 network_create.go:284] running [docker network inspect no-preload-924441] to gather additional debugging logs...
I1129 09:01:29.728011 494126 cli_runner.go:164] Run: docker network inspect no-preload-924441
W1129 09:01:29.747053 494126 cli_runner.go:211] docker network inspect no-preload-924441 returned with exit code 1
I1129 09:01:29.747092 494126 network_create.go:287] error running [docker network inspect no-preload-924441]: docker network inspect no-preload-924441: exit status 1
stdout:
[]
stderr:
Error response from daemon: network no-preload-924441 not found
I1129 09:01:29.747129 494126 network_create.go:289] output of [docker network inspect no-preload-924441]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network no-preload-924441 not found
** /stderr **
I1129 09:01:29.747297 494126 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1129 09:01:29.769138 494126 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-f69c672bf913 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:26:40:f4:ed:4f:ab} reservation:<nil>}
I1129 09:01:29.769961 494126 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-96d20aff5877 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:c2:01:e2:a3:b8:33} reservation:<nil>}
I1129 09:01:29.770795 494126 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-f7906c56f869 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:06:29:75:e3:e0:7f} reservation:<nil>}
I1129 09:01:29.771440 494126 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-aea341d97cf5 IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:ea:fb:22:ff:e0:65} reservation:<nil>}
I1129 09:01:29.771972 494126 network.go:211] skipping subnet 192.168.85.0/24 that is taken: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName:br-5ec7c7346e1b IfaceIPv4:192.168.85.1 IfaceMTU:1500 IfaceMAC:f6:a5:df:dd:c8:cf} reservation:<nil>}
I1129 09:01:29.772536 494126 network.go:211] skipping subnet 192.168.94.0/24 that is taken: &{IP:192.168.94.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.94.0/24 Gateway:192.168.94.1 ClientMin:192.168.94.2 ClientMax:192.168.94.254 Broadcast:192.168.94.255 IsPrivate:true Interface:{IfaceName:br-ede9a8c5c6b0 IfaceIPv4:192.168.94.1 IfaceMTU:1500 IfaceMAC:3e:6e:06:75:02:7a} reservation:<nil>}
I1129 09:01:29.773382 494126 network.go:206] using free private subnet 192.168.103.0/24: &{IP:192.168.103.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.103.0/24 Gateway:192.168.103.1 ClientMin:192.168.103.2 ClientMax:192.168.103.254 Broadcast:192.168.103.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc00201aa40}
I1129 09:01:29.773412 494126 network_create.go:124] attempt to create docker network no-preload-924441 192.168.103.0/24 with gateway 192.168.103.1 and MTU of 1500 ...
I1129 09:01:29.773492 494126 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.103.0/24 --gateway=192.168.103.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=no-preload-924441 no-preload-924441
I1129 09:01:29.826699 494126 network_create.go:108] docker network no-preload-924441 192.168.103.0/24 created
I1129 09:01:29.826822 494126 kic.go:121] calculated static IP "192.168.103.2" for the "no-preload-924441" container
I1129 09:01:29.826907 494126 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1129 09:01:29.848520 494126 cli_runner.go:164] Run: docker volume create no-preload-924441 --label name.minikube.sigs.k8s.io=no-preload-924441 --label created_by.minikube.sigs.k8s.io=true
I1129 09:01:29.870388 494126 oci.go:103] Successfully created a docker volume no-preload-924441
I1129 09:01:29.870496 494126 cli_runner.go:164] Run: docker run --rm --name no-preload-924441-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-924441 --entrypoint /usr/bin/test -v no-preload-924441:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1129 09:01:32.848045 494126 cli_runner.go:217] Completed: docker run --rm --name no-preload-924441-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-924441 --entrypoint /usr/bin/test -v no-preload-924441:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib: (2.977502795s)
I1129 09:01:32.848077 494126 oci.go:107] Successfully prepared a docker volume no-preload-924441
I1129 09:01:32.848131 494126 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
W1129 09:01:32.848227 494126 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1129 09:01:32.848271 494126 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1129 09:01:32.848312 494126 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1129 09:01:32.909124 494126 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname no-preload-924441 --name no-preload-924441 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-924441 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=no-preload-924441 --network no-preload-924441 --ip 192.168.103.2 --volume no-preload-924441:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1129 09:01:33.229639 494126 cli_runner.go:164] Run: docker container inspect no-preload-924441 --format={{.State.Running}}
I1129 09:01:33.257967 494126 cli_runner.go:164] Run: docker container inspect no-preload-924441 --format={{.State.Status}}
I1129 09:01:33.283525 494126 cli_runner.go:164] Run: docker exec no-preload-924441 stat /var/lib/dpkg/alternatives/iptables
I1129 09:01:33.358911 494126 oci.go:144] the created container "no-preload-924441" has a running status.
I1129 09:01:33.358964 494126 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa...
I1129 09:01:33.456248 494126 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1129 09:01:33.491041 494126 cli_runner.go:164] Run: docker container inspect no-preload-924441 --format={{.State.Status}}
I1129 09:01:33.515555 494126 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1129 09:01:33.515581 494126 kic_runner.go:114] Args: [docker exec --privileged no-preload-924441 chown docker:docker /home/docker/.ssh/authorized_keys]
I1129 09:01:33.567971 494126 cli_runner.go:164] Run: docker container inspect no-preload-924441 --format={{.State.Status}}
I1129 09:01:33.599907 494126 machine.go:94] provisionDockerMachine start ...
I1129 09:01:33.599999 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:33.634873 494126 main.go:143] libmachine: Using SSH client type: native
I1129 09:01:33.635521 494126 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1129 09:01:33.635590 494126 main.go:143] libmachine: About to run SSH command:
hostname
I1129 09:01:33.636667 494126 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:34766->127.0.0.1:33063: read: connection reset by peer
I1129 09:01:29.724136 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:01:29.724608 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:01:29.724657 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:01:29.724702 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:01:29.763194 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:29.763266 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:29.763286 460401 cri.go:89] found id: ""
I1129 09:01:29.763304 460401 logs.go:282] 2 containers: [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:01:29.763372 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.769877 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.774814 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:01:29.774887 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:01:29.810078 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:29.810105 460401 cri.go:89] found id: ""
I1129 09:01:29.810116 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:01:29.810167 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.815272 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:01:29.815349 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:01:29.851653 460401 cri.go:89] found id: ""
I1129 09:01:29.851680 460401 logs.go:282] 0 containers: []
W1129 09:01:29.851691 460401 logs.go:284] No container was found matching "coredns"
I1129 09:01:29.851700 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:01:29.851773 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:01:29.883424 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:29.883449 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:29.883456 460401 cri.go:89] found id: ""
I1129 09:01:29.883466 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:01:29.883537 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.889105 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.894072 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:01:29.894150 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:01:29.924971 460401 cri.go:89] found id: ""
I1129 09:01:29.925006 460401 logs.go:282] 0 containers: []
W1129 09:01:29.925019 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:01:29.925027 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:01:29.925129 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:01:29.954168 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:29.954194 460401 cri.go:89] found id: "f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:29.954199 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:29.954203 460401 cri.go:89] found id: ""
I1129 09:01:29.954214 460401 logs.go:282] 3 containers: [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:01:29.954278 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.959542 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.964240 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.968754 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:01:29.968820 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:01:29.999663 460401 cri.go:89] found id: ""
I1129 09:01:29.999685 460401 logs.go:282] 0 containers: []
W1129 09:01:29.999694 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:01:29.999700 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:01:29.999780 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:01:30.029803 460401 cri.go:89] found id: ""
I1129 09:01:30.029833 460401 logs.go:282] 0 containers: []
W1129 09:01:30.029845 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:01:30.029859 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:01:30.029877 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:30.069873 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:01:30.069904 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:30.108923 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:01:30.108958 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:30.146649 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:01:30.146682 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:01:30.190480 460401 logs.go:123] Gathering logs for container status ...
I1129 09:01:30.190514 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:01:30.225134 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:01:30.225167 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:01:30.299416 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:01:30.299461 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:01:30.314711 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:01:30.314766 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:01:30.384833 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:01:30.384856 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:01:30.384879 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:30.420690 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:01:30.420720 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:30.476182 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:01:30.476221 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:30.507666 460401 logs.go:123] Gathering logs for kube-controller-manager [f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00] ...
I1129 09:01:30.507698 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:30.536613 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:01:30.536640 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:33.076844 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:01:33.077304 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:01:33.077371 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:01:33.077426 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:01:33.111899 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:33.111922 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:33.111928 460401 cri.go:89] found id: ""
I1129 09:01:33.111938 460401 logs.go:282] 2 containers: [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:01:33.111995 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.117191 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.122615 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:01:33.122688 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:01:33.163794 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:33.163822 460401 cri.go:89] found id: ""
I1129 09:01:33.163834 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:01:33.163897 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.170244 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:01:33.170334 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:01:33.203629 460401 cri.go:89] found id: ""
I1129 09:01:33.203662 460401 logs.go:282] 0 containers: []
W1129 09:01:33.203675 460401 logs.go:284] No container was found matching "coredns"
I1129 09:01:33.203683 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:01:33.203759 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:01:33.248112 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:33.248142 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:33.248148 460401 cri.go:89] found id: ""
I1129 09:01:33.248159 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:01:33.248226 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.255192 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.262339 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:01:33.262419 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:01:33.308727 460401 cri.go:89] found id: ""
I1129 09:01:33.308855 460401 logs.go:282] 0 containers: []
W1129 09:01:33.308869 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:01:33.308878 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:01:33.309309 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:01:33.361181 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:33.361234 460401 cri.go:89] found id: "f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:33.361241 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:33.361245 460401 cri.go:89] found id: ""
I1129 09:01:33.361255 460401 logs.go:282] 3 containers: [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:01:33.361343 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.368091 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.374495 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.380899 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:01:33.380965 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:01:33.430643 460401 cri.go:89] found id: ""
I1129 09:01:33.430670 460401 logs.go:282] 0 containers: []
W1129 09:01:33.430681 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:01:33.430689 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:01:33.430771 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:01:33.467019 460401 cri.go:89] found id: ""
I1129 09:01:33.467047 460401 logs.go:282] 0 containers: []
W1129 09:01:33.467058 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:01:33.467072 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:01:33.467091 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:01:33.529538 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:01:33.529588 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:33.591866 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:01:33.591912 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:33.664144 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:01:33.664179 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:33.701152 460401 logs.go:123] Gathering logs for kube-controller-manager [f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00] ...
I1129 09:01:33.701195 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:33.735624 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:01:33.735669 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:33.774144 460401 logs.go:123] Gathering logs for container status ...
I1129 09:01:33.774175 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:01:33.808426 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:01:33.808461 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:01:33.898471 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:01:33.898509 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:01:33.914358 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:01:33.914394 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:01:33.978927 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:01:33.978954 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:01:33.978975 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:34.016239 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:01:34.016268 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:34.055208 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:01:34.055239 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:33.275806 493486 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1129 09:01:33.275832 493486 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-295154 chown docker:docker /home/docker/.ssh/authorized_keys]
I1129 09:01:33.349350 493486 cli_runner.go:164] Run: docker container inspect old-k8s-version-295154 --format={{.State.Status}}
I1129 09:01:33.378383 493486 machine.go:94] provisionDockerMachine start ...
I1129 09:01:33.378475 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:33.410015 493486 main.go:143] libmachine: Using SSH client type: native
I1129 09:01:33.410367 493486 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33058 <nil> <nil>}
I1129 09:01:33.410384 493486 main.go:143] libmachine: About to run SSH command:
hostname
I1129 09:01:33.577990 493486 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-295154
I1129 09:01:33.578018 493486 ubuntu.go:182] provisioning hostname "old-k8s-version-295154"
I1129 09:01:33.578086 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:33.609401 493486 main.go:143] libmachine: Using SSH client type: native
I1129 09:01:33.609890 493486 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33058 <nil> <nil>}
I1129 09:01:33.609953 493486 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-295154 && echo "old-k8s-version-295154" | sudo tee /etc/hostname
I1129 09:01:33.789112 493486 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-295154
I1129 09:01:33.789205 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:33.813423 493486 main.go:143] libmachine: Using SSH client type: native
I1129 09:01:33.813741 493486 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33058 <nil> <nil>}
I1129 09:01:33.813774 493486 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-295154' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-295154/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-295154' | sudo tee -a /etc/hosts;
fi
fi
I1129 09:01:33.966671 493486 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1129 09:01:33.966701 493486 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/22000-255825/.minikube CaCertPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/22000-255825/.minikube}
I1129 09:01:33.966720 493486 ubuntu.go:190] setting up certificates
I1129 09:01:33.966746 493486 provision.go:84] configureAuth start
I1129 09:01:33.966809 493486 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-295154
I1129 09:01:33.987509 493486 provision.go:143] copyHostCerts
I1129 09:01:33.987591 493486 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-255825/.minikube/ca.pem, removing ...
I1129 09:01:33.987609 493486 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-255825/.minikube/ca.pem
I1129 09:01:33.987703 493486 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/22000-255825/.minikube/ca.pem (1078 bytes)
I1129 09:01:33.987854 493486 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-255825/.minikube/cert.pem, removing ...
I1129 09:01:33.987873 493486 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-255825/.minikube/cert.pem
I1129 09:01:33.987926 493486 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/22000-255825/.minikube/cert.pem (1123 bytes)
I1129 09:01:33.988030 493486 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-255825/.minikube/key.pem, removing ...
I1129 09:01:33.988043 493486 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-255825/.minikube/key.pem
I1129 09:01:33.988093 493486 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/22000-255825/.minikube/key.pem (1679 bytes)
I1129 09:01:33.988197 493486 provision.go:117] generating server cert: /home/jenkins/minikube-integration/22000-255825/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-295154 san=[127.0.0.1 192.168.76.2 localhost minikube old-k8s-version-295154]
I1129 09:01:34.173289 493486 provision.go:177] copyRemoteCerts
I1129 09:01:34.173365 493486 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1129 09:01:34.173409 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:34.192053 493486 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa Username:docker}
I1129 09:01:34.294293 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1129 09:01:34.313898 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1129 09:01:34.331337 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1129 09:01:34.348272 493486 provision.go:87] duration metric: took 381.510752ms to configureAuth
I1129 09:01:34.348301 493486 ubuntu.go:206] setting minikube options for container-runtime
I1129 09:01:34.348457 493486 config.go:182] Loaded profile config "old-k8s-version-295154": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1129 09:01:34.348472 493486 machine.go:97] duration metric: took 970.068662ms to provisionDockerMachine
I1129 09:01:34.348481 493486 client.go:176] duration metric: took 5.886553133s to LocalClient.Create
I1129 09:01:34.348502 493486 start.go:167] duration metric: took 5.88663904s to libmachine.API.Create "old-k8s-version-295154"
I1129 09:01:34.348512 493486 start.go:293] postStartSetup for "old-k8s-version-295154" (driver="docker")
I1129 09:01:34.348520 493486 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1129 09:01:34.348570 493486 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1129 09:01:34.348614 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:34.366501 493486 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa Username:docker}
I1129 09:01:34.469910 493486 ssh_runner.go:195] Run: cat /etc/os-release
I1129 09:01:34.473823 493486 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1129 09:01:34.473855 493486 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1129 09:01:34.473868 493486 filesync.go:126] Scanning /home/jenkins/minikube-integration/22000-255825/.minikube/addons for local assets ...
I1129 09:01:34.473922 493486 filesync.go:126] Scanning /home/jenkins/minikube-integration/22000-255825/.minikube/files for local assets ...
I1129 09:01:34.474038 493486 filesync.go:149] local asset: /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem -> 2594832.pem in /etc/ssl/certs
I1129 09:01:34.474177 493486 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1129 09:01:34.481912 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem --> /etc/ssl/certs/2594832.pem (1708 bytes)
I1129 09:01:34.502433 493486 start.go:296] duration metric: took 153.905912ms for postStartSetup
I1129 09:01:34.502813 493486 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-295154
I1129 09:01:34.520071 493486 profile.go:143] Saving config to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/config.json ...
I1129 09:01:34.520308 493486 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1129 09:01:34.520347 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:34.539111 493486 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa Username:docker}
I1129 09:01:34.640199 493486 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1129 09:01:34.644901 493486 start.go:128] duration metric: took 6.185289215s to createHost
I1129 09:01:34.644928 493486 start.go:83] releasing machines lock for "old-k8s-version-295154", held for 6.185484113s
I1129 09:01:34.644991 493486 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-295154
I1129 09:01:34.662525 493486 ssh_runner.go:195] Run: cat /version.json
I1129 09:01:34.662583 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:34.662584 493486 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1129 09:01:34.662648 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:34.679837 493486 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa Username:docker}
I1129 09:01:34.681115 493486 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa Username:docker}
I1129 09:01:34.833568 493486 ssh_runner.go:195] Run: systemctl --version
I1129 09:01:34.840355 493486 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1129 09:01:34.844844 493486 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1129 09:01:34.844907 493486 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1129 09:01:34.869137 493486 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1129 09:01:34.869161 493486 start.go:496] detecting cgroup driver to use...
I1129 09:01:34.869194 493486 detect.go:190] detected "systemd" cgroup driver on host os
I1129 09:01:34.869251 493486 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1129 09:01:34.883461 493486 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1129 09:01:34.895885 493486 docker.go:218] disabling cri-docker service (if available) ...
I1129 09:01:34.895942 493486 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1129 09:01:34.912002 493486 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1129 09:01:34.929350 493486 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1129 09:01:35.015369 493486 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1129 09:01:35.101537 493486 docker.go:234] disabling docker service ...
I1129 09:01:35.101597 493486 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1129 09:01:35.120759 493486 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1129 09:01:35.133226 493486 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1129 09:01:35.217122 493486 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1129 09:01:35.301702 493486 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1129 09:01:35.314440 493486 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1129 09:01:35.328312 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1129 09:01:35.338331 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1129 09:01:35.346975 493486 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1129 09:01:35.347033 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1129 09:01:35.355511 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1129 09:01:35.363986 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1129 09:01:35.372342 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1129 09:01:35.380589 493486 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1129 09:01:35.388205 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1129 09:01:35.396344 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1129 09:01:35.404459 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1129 09:01:35.412783 493486 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1129 09:01:35.420177 493486 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1129 09:01:35.427378 493486 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:01:35.508150 493486 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1129 09:01:35.605801 493486 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1129 09:01:35.605868 493486 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1129 09:01:35.610095 493486 start.go:564] Will wait 60s for crictl version
I1129 09:01:35.610140 493486 ssh_runner.go:195] Run: which crictl
I1129 09:01:35.613826 493486 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1129 09:01:35.640869 493486 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1129 09:01:35.640947 493486 ssh_runner.go:195] Run: containerd --version
I1129 09:01:35.662573 493486 ssh_runner.go:195] Run: containerd --version
I1129 09:01:35.686990 493486 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1129 09:01:35.688126 493486 cli_runner.go:164] Run: docker network inspect old-k8s-version-295154 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1129 09:01:35.705269 493486 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I1129 09:01:35.709565 493486 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1129 09:01:35.720029 493486 kubeadm.go:884] updating cluster {Name:old-k8s-version-295154 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-295154 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1129 09:01:35.720146 493486 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1129 09:01:35.720192 493486 ssh_runner.go:195] Run: sudo crictl images --output json
I1129 09:01:35.745337 493486 containerd.go:627] all images are preloaded for containerd runtime.
I1129 09:01:35.745359 493486 containerd.go:534] Images already preloaded, skipping extraction
I1129 09:01:35.745433 493486 ssh_runner.go:195] Run: sudo crictl images --output json
I1129 09:01:35.768552 493486 containerd.go:627] all images are preloaded for containerd runtime.
I1129 09:01:35.768573 493486 cache_images.go:86] Images are preloaded, skipping loading
I1129 09:01:35.768582 493486 kubeadm.go:935] updating node { 192.168.76.2 8443 v1.28.0 containerd true true} ...
I1129 09:01:35.768708 493486 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-295154 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-295154 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1129 09:01:35.768800 493486 ssh_runner.go:195] Run: sudo crictl info
I1129 09:01:35.793684 493486 cni.go:84] Creating CNI manager for ""
I1129 09:01:35.793704 493486 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1129 09:01:35.793722 493486 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1129 09:01:35.793760 493486 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-295154 NodeName:old-k8s-version-295154 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt S
taticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1129 09:01:35.793881 493486 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-295154"
kubeletExtraArgs:
node-ip: 192.168.76.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1129 09:01:35.793941 493486 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1129 09:01:35.801702 493486 binaries.go:51] Found k8s binaries, skipping transfer
I1129 09:01:35.801779 493486 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1129 09:01:35.809370 493486 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1129 09:01:35.821645 493486 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1129 09:01:35.837123 493486 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2175 bytes)
I1129 09:01:35.849282 493486 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I1129 09:01:35.852777 493486 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1129 09:01:35.862291 493486 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:01:35.945522 493486 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1129 09:01:35.967020 493486 certs.go:69] Setting up /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154 for IP: 192.168.76.2
I1129 09:01:35.967046 493486 certs.go:195] generating shared ca certs ...
I1129 09:01:35.967066 493486 certs.go:227] acquiring lock for ca certs: {Name:mk5e6bcae0a6944966b241f3c6197a472703c991 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:35.967208 493486 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/22000-255825/.minikube/ca.key
I1129 09:01:35.967259 493486 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/22000-255825/.minikube/proxy-client-ca.key
I1129 09:01:35.967269 493486 certs.go:257] generating profile certs ...
I1129 09:01:35.967334 493486 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/client.key
I1129 09:01:35.967347 493486 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/client.crt with IP's: []
I1129 09:01:36.097254 493486 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/client.crt ...
I1129 09:01:36.097290 493486 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/client.crt: {Name:mk21cfae97f1407d02cd99fe2a74be759b699397 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:36.097496 493486 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/client.key ...
I1129 09:01:36.097514 493486 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/client.key: {Name:mk0736bb845004e9c4d4a2d8602930ec0568eec2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:36.097631 493486 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.key.a040bf72
I1129 09:01:36.097693 493486 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.crt.a040bf72 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I1129 09:01:36.144552 493486 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.crt.a040bf72 ...
I1129 09:01:36.144579 493486 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.crt.a040bf72: {Name:mk3fedcec97acb487835213600ee8b696c362f94 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:36.144774 493486 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.key.a040bf72 ...
I1129 09:01:36.144793 493486 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.key.a040bf72: {Name:mk9dc52d2daf1391895a4ee3c561f559be0e2755 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:36.144904 493486 certs.go:382] copying /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.crt.a040bf72 -> /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.crt
I1129 09:01:36.145012 493486 certs.go:386] copying /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.key.a040bf72 -> /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.key
I1129 09:01:36.145117 493486 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.key
I1129 09:01:36.145138 493486 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.crt with IP's: []
I1129 09:01:36.307914 493486 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.crt ...
I1129 09:01:36.307946 493486 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.crt: {Name:mk698ad1b9e2e29d385fd97b123d5b48273c6d5b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:36.308144 493486 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.key ...
I1129 09:01:36.308172 493486 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.key: {Name:mkcfd3db96260b6b8677060f32dcbd4dd8f838bc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:36.308432 493486 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/259483.pem (1338 bytes)
W1129 09:01:36.308490 493486 certs.go:480] ignoring /home/jenkins/minikube-integration/22000-255825/.minikube/certs/259483_empty.pem, impossibly tiny 0 bytes
I1129 09:01:36.308506 493486 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca-key.pem (1675 bytes)
I1129 09:01:36.308543 493486 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem (1078 bytes)
I1129 09:01:36.308590 493486 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem (1123 bytes)
I1129 09:01:36.308633 493486 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/key.pem (1679 bytes)
I1129 09:01:36.308689 493486 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem (1708 bytes)
I1129 09:01:36.309360 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1129 09:01:36.328372 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1129 09:01:36.345872 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1129 09:01:36.363285 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1129 09:01:36.380427 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1129 09:01:36.397563 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1129 09:01:36.414929 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1129 09:01:36.432334 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1129 09:01:36.449233 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem --> /usr/share/ca-certificates/2594832.pem (1708 bytes)
I1129 09:01:36.469085 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1129 09:01:36.485869 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/certs/259483.pem --> /usr/share/ca-certificates/259483.pem (1338 bytes)
I1129 09:01:36.502784 493486 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1129 09:01:36.515208 493486 ssh_runner.go:195] Run: openssl version
I1129 09:01:36.521390 493486 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1129 09:01:36.529514 493486 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1129 09:01:36.533021 493486 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 29 08:29 /usr/share/ca-certificates/minikubeCA.pem
I1129 09:01:36.533062 493486 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1129 09:01:36.567579 493486 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1129 09:01:36.576162 493486 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/259483.pem && ln -fs /usr/share/ca-certificates/259483.pem /etc/ssl/certs/259483.pem"
I1129 09:01:36.584343 493486 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/259483.pem
I1129 09:01:36.588122 493486 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 29 08:35 /usr/share/ca-certificates/259483.pem
I1129 09:01:36.588176 493486 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/259483.pem
I1129 09:01:36.626659 493486 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/259483.pem /etc/ssl/certs/51391683.0"
I1129 09:01:36.635780 493486 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/2594832.pem && ln -fs /usr/share/ca-certificates/2594832.pem /etc/ssl/certs/2594832.pem"
I1129 09:01:36.644862 493486 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2594832.pem
I1129 09:01:36.648851 493486 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 29 08:35 /usr/share/ca-certificates/2594832.pem
I1129 09:01:36.648906 493486 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2594832.pem
I1129 09:01:36.691340 493486 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/2594832.pem /etc/ssl/certs/3ec20f2e.0"
I1129 09:01:36.701173 493486 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1129 09:01:36.705050 493486 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1129 09:01:36.705110 493486 kubeadm.go:401] StartCluster: {Name:old-k8s-version-295154 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-295154 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1129 09:01:36.705201 493486 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1129 09:01:36.705272 493486 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1129 09:01:36.734535 493486 cri.go:89] found id: ""
I1129 09:01:36.734592 493486 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1129 09:01:36.743400 493486 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1129 09:01:36.751273 493486 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1129 09:01:36.751332 493486 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1129 09:01:36.760386 493486 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1129 09:01:36.760404 493486 kubeadm.go:158] found existing configuration files:
I1129 09:01:36.760450 493486 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1129 09:01:36.768796 493486 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1129 09:01:36.768854 493486 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1129 09:01:36.776326 493486 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1129 09:01:36.784663 493486 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1129 09:01:36.784720 493486 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1129 09:01:36.793650 493486 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1129 09:01:36.801817 493486 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1129 09:01:36.801887 493486 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1129 09:01:36.811081 493486 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1129 09:01:36.819075 493486 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1129 09:01:36.819130 493486 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1129 09:01:36.827369 493486 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1129 09:01:36.885752 493486 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1129 09:01:36.885824 493486 kubeadm.go:319] [preflight] Running pre-flight checks
I1129 09:01:36.932588 493486 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1129 09:01:36.932993 493486 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1129 09:01:36.933139 493486 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1129 09:01:36.933232 493486 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1129 09:01:36.933332 493486 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1129 09:01:36.933468 493486 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1129 09:01:36.933539 493486 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1129 09:01:36.933597 493486 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1129 09:01:36.933656 493486 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1129 09:01:36.933717 493486 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1129 09:01:36.933794 493486 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1129 09:01:37.018039 493486 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1129 09:01:37.018169 493486 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1129 09:01:37.018319 493486 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1129 09:01:37.171075 493486 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1129 09:01:37.173428 493486 out.go:252] - Generating certificates and keys ...
I1129 09:01:37.173535 493486 kubeadm.go:319] [certs] Using existing ca certificate authority
I1129 09:01:37.173613 493486 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1129 09:01:37.301964 493486 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1129 09:01:37.410711 493486 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1129 09:01:37.550821 493486 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1129 09:01:37.787553 493486 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1129 09:01:37.889172 493486 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1129 09:01:37.889414 493486 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-295154] and IPs [192.168.76.2 127.0.0.1 ::1]
I1129 09:01:38.063017 493486 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1129 09:01:38.063214 493486 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-295154] and IPs [192.168.76.2 127.0.0.1 ::1]
I1129 09:01:38.202234 493486 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1129 09:01:38.262563 493486 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1129 09:01:36.787780 494126 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-924441
I1129 09:01:36.787807 494126 ubuntu.go:182] provisioning hostname "no-preload-924441"
I1129 09:01:36.787868 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:36.808836 494126 main.go:143] libmachine: Using SSH client type: native
I1129 09:01:36.809153 494126 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1129 09:01:36.809173 494126 main.go:143] libmachine: About to run SSH command:
sudo hostname no-preload-924441 && echo "no-preload-924441" | sudo tee /etc/hostname
I1129 09:01:36.973090 494126 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-924441
I1129 09:01:36.973172 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:36.993095 494126 main.go:143] libmachine: Using SSH client type: native
I1129 09:01:36.993348 494126 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1129 09:01:36.993366 494126 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sno-preload-924441' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 no-preload-924441/g' /etc/hosts;
else
echo '127.0.1.1 no-preload-924441' | sudo tee -a /etc/hosts;
fi
fi
I1129 09:01:37.147252 494126 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1129 09:01:37.147286 494126 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/22000-255825/.minikube CaCertPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/22000-255825/.minikube}
I1129 09:01:37.147336 494126 ubuntu.go:190] setting up certificates
I1129 09:01:37.147350 494126 provision.go:84] configureAuth start
I1129 09:01:37.147407 494126 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-924441
I1129 09:01:37.167771 494126 provision.go:143] copyHostCerts
I1129 09:01:37.167841 494126 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-255825/.minikube/ca.pem, removing ...
I1129 09:01:37.167856 494126 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-255825/.minikube/ca.pem
I1129 09:01:37.167941 494126 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/22000-255825/.minikube/ca.pem (1078 bytes)
I1129 09:01:37.168073 494126 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-255825/.minikube/cert.pem, removing ...
I1129 09:01:37.168087 494126 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-255825/.minikube/cert.pem
I1129 09:01:37.168135 494126 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/22000-255825/.minikube/cert.pem (1123 bytes)
I1129 09:01:37.168246 494126 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-255825/.minikube/key.pem, removing ...
I1129 09:01:37.168259 494126 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-255825/.minikube/key.pem
I1129 09:01:37.168304 494126 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/22000-255825/.minikube/key.pem (1679 bytes)
I1129 09:01:37.168383 494126 provision.go:117] generating server cert: /home/jenkins/minikube-integration/22000-255825/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca-key.pem org=jenkins.no-preload-924441 san=[127.0.0.1 192.168.103.2 localhost minikube no-preload-924441]
I1129 09:01:37.302569 494126 provision.go:177] copyRemoteCerts
I1129 09:01:37.302625 494126 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1129 09:01:37.302676 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:37.320965 494126 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa Username:docker}
I1129 09:01:37.425520 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1129 09:01:37.446589 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1129 09:01:37.463963 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1129 09:01:37.480486 494126 provision.go:87] duration metric: took 333.119398ms to configureAuth
I1129 09:01:37.480511 494126 ubuntu.go:206] setting minikube options for container-runtime
I1129 09:01:37.480667 494126 config.go:182] Loaded profile config "no-preload-924441": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1129 09:01:37.480680 494126 machine.go:97] duration metric: took 3.880753165s to provisionDockerMachine
I1129 09:01:37.480691 494126 client.go:176] duration metric: took 7.771282469s to LocalClient.Create
I1129 09:01:37.480714 494126 start.go:167] duration metric: took 7.771346771s to libmachine.API.Create "no-preload-924441"
I1129 09:01:37.480726 494126 start.go:293] postStartSetup for "no-preload-924441" (driver="docker")
I1129 09:01:37.480750 494126 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1129 09:01:37.480814 494126 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1129 09:01:37.480883 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:37.498996 494126 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa Username:docker}
I1129 09:01:37.602864 494126 ssh_runner.go:195] Run: cat /etc/os-release
I1129 09:01:37.606394 494126 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1129 09:01:37.606428 494126 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1129 09:01:37.606439 494126 filesync.go:126] Scanning /home/jenkins/minikube-integration/22000-255825/.minikube/addons for local assets ...
I1129 09:01:37.606502 494126 filesync.go:126] Scanning /home/jenkins/minikube-integration/22000-255825/.minikube/files for local assets ...
I1129 09:01:37.606593 494126 filesync.go:149] local asset: /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem -> 2594832.pem in /etc/ssl/certs
I1129 09:01:37.606724 494126 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1129 09:01:37.614670 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem --> /etc/ssl/certs/2594832.pem (1708 bytes)
I1129 09:01:37.635134 494126 start.go:296] duration metric: took 154.380805ms for postStartSetup
I1129 09:01:37.635554 494126 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-924441
I1129 09:01:37.655528 494126 profile.go:143] Saving config to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/config.json ...
I1129 09:01:37.655850 494126 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1129 09:01:37.655900 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:37.677317 494126 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa Username:docker}
I1129 09:01:37.781275 494126 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1129 09:01:37.786042 494126 start.go:128] duration metric: took 8.07881841s to createHost
I1129 09:01:37.786069 494126 start.go:83] releasing machines lock for "no-preload-924441", held for 8.078998368s
I1129 09:01:37.786141 494126 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-924441
I1129 09:01:37.805459 494126 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1129 09:01:37.805494 494126 ssh_runner.go:195] Run: cat /version.json
I1129 09:01:37.805552 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:37.805561 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:37.824515 494126 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa Username:docker}
I1129 09:01:37.825042 494126 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa Username:docker}
I1129 09:01:37.978797 494126 ssh_runner.go:195] Run: systemctl --version
I1129 09:01:37.985561 494126 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1129 09:01:37.990121 494126 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1129 09:01:37.990198 494126 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1129 09:01:38.014806 494126 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1129 09:01:38.014833 494126 start.go:496] detecting cgroup driver to use...
I1129 09:01:38.014872 494126 detect.go:190] detected "systemd" cgroup driver on host os
I1129 09:01:38.014922 494126 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1129 09:01:38.028890 494126 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1129 09:01:38.040635 494126 docker.go:218] disabling cri-docker service (if available) ...
I1129 09:01:38.040704 494126 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1129 09:01:38.059274 494126 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1129 09:01:38.079903 494126 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1129 09:01:38.160895 494126 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1129 09:01:38.248638 494126 docker.go:234] disabling docker service ...
I1129 09:01:38.248693 494126 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1129 09:01:38.270699 494126 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1129 09:01:38.283241 494126 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1129 09:01:38.364018 494126 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1129 09:01:38.451578 494126 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1129 09:01:38.464900 494126 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1129 09:01:38.478711 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1129 09:01:38.488688 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1129 09:01:38.497188 494126 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1129 09:01:38.497235 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1129 09:01:38.506143 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1129 09:01:38.514500 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1129 09:01:38.522578 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1129 09:01:38.530605 494126 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1129 09:01:38.538074 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1129 09:01:38.546395 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1129 09:01:38.554633 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1129 09:01:38.564192 494126 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1129 09:01:38.571328 494126 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1129 09:01:38.578488 494126 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:01:38.657072 494126 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1129 09:01:38.731899 494126 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1129 09:01:38.731970 494126 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1129 09:01:38.736165 494126 start.go:564] Will wait 60s for crictl version
I1129 09:01:38.736223 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:38.739821 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1129 09:01:38.765727 494126 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1129 09:01:38.765799 494126 ssh_runner.go:195] Run: containerd --version
I1129 09:01:38.788554 494126 ssh_runner.go:195] Run: containerd --version
I1129 09:01:38.813801 494126 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
I1129 09:01:38.554215 493486 kubeadm.go:319] [certs] Generating "sa" key and public key
I1129 09:01:38.554337 493486 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1129 09:01:38.871587 493486 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1129 09:01:39.076048 493486 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1129 09:01:39.365556 493486 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1129 09:01:39.428949 493486 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1129 09:01:39.429579 493486 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1129 09:01:39.438444 493486 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1129 09:01:38.814940 494126 cli_runner.go:164] Run: docker network inspect no-preload-924441 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1129 09:01:38.832444 494126 ssh_runner.go:195] Run: grep 192.168.103.1 host.minikube.internal$ /etc/hosts
I1129 09:01:38.836556 494126 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.103.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1129 09:01:38.846826 494126 kubeadm.go:884] updating cluster {Name:no-preload-924441 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-924441 Namespace:default APIServerHAVIP: APIServerName:minikubeCA API
ServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemu
FirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1129 09:01:38.846940 494126 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1129 09:01:38.846988 494126 ssh_runner.go:195] Run: sudo crictl images --output json
I1129 09:01:38.875513 494126 containerd.go:623] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.34.1". assuming images are not preloaded.
I1129 09:01:38.875537 494126 cache_images.go:90] LoadCachedImages start: [registry.k8s.io/kube-apiserver:v1.34.1 registry.k8s.io/kube-controller-manager:v1.34.1 registry.k8s.io/kube-scheduler:v1.34.1 registry.k8s.io/kube-proxy:v1.34.1 registry.k8s.io/pause:3.10.1 registry.k8s.io/etcd:3.6.4-0 registry.k8s.io/coredns/coredns:v1.12.1 gcr.io/k8s-minikube/storage-provisioner:v5]
I1129 09:01:38.875606 494126 image.go:138] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:38.875606 494126 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:01:38.875633 494126 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1129 09:01:38.875642 494126 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:01:38.875663 494126 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1129 09:01:38.875672 494126 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:01:38.875613 494126 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1129 09:01:38.875710 494126 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:01:38.877065 494126 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:01:38.877082 494126 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1129 09:01:38.877098 494126 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:01:38.877104 494126 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1129 09:01:38.877132 494126 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:01:38.877185 494126 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:01:38.877233 494126 image.go:181] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:38.877189 494126 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1129 09:01:39.045541 494126 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.34.1" and sha "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813"
I1129 09:01:39.045605 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:01:39.049466 494126 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.34.1" and sha "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f"
I1129 09:01:39.049525 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:01:39.055696 494126 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.34.1" and sha "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97"
I1129 09:01:39.055787 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:01:39.065913 494126 containerd.go:267] Checking existence of image with name "registry.k8s.io/etcd:3.6.4-0" and sha "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115"
I1129 09:01:39.065987 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/etcd:3.6.4-0
I1129 09:01:39.071326 494126 cache_images.go:118] "registry.k8s.io/kube-scheduler:v1.34.1" needs transfer: "registry.k8s.io/kube-scheduler:v1.34.1" does not exist at hash "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813" in container runtime
I1129 09:01:39.071386 494126 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:01:39.071433 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.072494 494126 containerd.go:267] Checking existence of image with name "registry.k8s.io/coredns/coredns:v1.12.1" and sha "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969"
I1129 09:01:39.072560 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:01:39.074055 494126 cache_images.go:118] "registry.k8s.io/kube-controller-manager:v1.34.1" needs transfer: "registry.k8s.io/kube-controller-manager:v1.34.1" does not exist at hash "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f" in container runtime
I1129 09:01:39.074103 494126 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:01:39.074155 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.079805 494126 cache_images.go:118] "registry.k8s.io/kube-apiserver:v1.34.1" needs transfer: "registry.k8s.io/kube-apiserver:v1.34.1" does not exist at hash "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97" in container runtime
I1129 09:01:39.079853 494126 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:01:39.079906 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.090225 494126 cache_images.go:118] "registry.k8s.io/etcd:3.6.4-0" needs transfer: "registry.k8s.io/etcd:3.6.4-0" does not exist at hash "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115" in container runtime
I1129 09:01:39.090271 494126 cri.go:218] Removing image: registry.k8s.io/etcd:3.6.4-0
I1129 09:01:39.090279 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:01:39.090318 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.094954 494126 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.34.1" and sha "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7"
I1129 09:01:39.095016 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-proxy:v1.34.1
I1129 09:01:39.096356 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:01:39.096365 494126 cache_images.go:118] "registry.k8s.io/coredns/coredns:v1.12.1" needs transfer: "registry.k8s.io/coredns/coredns:v1.12.1" does not exist at hash "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969" in container runtime
I1129 09:01:39.096402 494126 cri.go:218] Removing image: registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:01:39.096438 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:01:39.096440 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.108053 494126 containerd.go:267] Checking existence of image with name "registry.k8s.io/pause:3.10.1" and sha "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f"
I1129 09:01:39.108111 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/pause:3.10.1
I1129 09:01:39.125198 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:01:39.125300 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1129 09:01:39.125361 494126 cache_images.go:118] "registry.k8s.io/kube-proxy:v1.34.1" needs transfer: "registry.k8s.io/kube-proxy:v1.34.1" does not exist at hash "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7" in container runtime
I1129 09:01:39.125408 494126 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.34.1
I1129 09:01:39.125455 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.128374 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:01:39.132565 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:01:39.132640 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:01:39.138113 494126 cache_images.go:118] "registry.k8s.io/pause:3.10.1" needs transfer: "registry.k8s.io/pause:3.10.1" does not exist at hash "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f" in container runtime
I1129 09:01:39.138163 494126 cri.go:218] Removing image: registry.k8s.io/pause:3.10.1
I1129 09:01:39.138200 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.167013 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:01:39.167128 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1129 09:01:39.167330 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:01:39.167330 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1129 09:01:39.167996 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:01:39.173113 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:01:39.173171 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1129 09:01:39.214078 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1129 09:01:39.214193 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1
I1129 09:01:39.214389 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1129 09:01:39.214576 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1129 09:01:39.220552 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1129 09:01:39.220649 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1
I1129 09:01:39.220857 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-scheduler_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-scheduler_v1.34.1': No such file or directory
I1129 09:01:39.220895 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 --> /var/lib/minikube/images/kube-scheduler_v1.34.1 (17396736 bytes)
I1129 09:01:39.222433 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:01:39.222493 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1129 09:01:39.222587 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1129 09:01:39.222669 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1129 09:01:39.275608 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1129 09:01:39.275622 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1129 09:01:39.275679 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-apiserver_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-apiserver_v1.34.1': No such file or directory
I1129 09:01:39.275707 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 --> /var/lib/minikube/images/kube-apiserver_v1.34.1 (27073024 bytes)
I1129 09:01:39.275716 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0
I1129 09:01:39.287672 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1129 09:01:39.287708 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1129 09:01:39.287708 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-controller-manager_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-controller-manager_v1.34.1': No such file or directory
I1129 09:01:39.287808 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 --> /var/lib/minikube/images/kube-controller-manager_v1.34.1 (22831104 bytes)
I1129 09:01:39.287825 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1
I1129 09:01:39.339051 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/etcd_3.6.4-0: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/etcd_3.6.4-0': No such file or directory
I1129 09:01:39.339082 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1129 09:01:39.339092 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 --> /var/lib/minikube/images/etcd_3.6.4-0 (74320896 bytes)
I1129 09:01:39.339110 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/coredns_v1.12.1: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/coredns_v1.12.1': No such file or directory
I1129 09:01:39.339137 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 --> /var/lib/minikube/images/coredns_v1.12.1 (22394368 bytes)
I1129 09:01:39.339173 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1
I1129 09:01:39.339202 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1129 09:01:39.339317 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1
I1129 09:01:39.424948 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/pause_3.10.1: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/pause_3.10.1': No such file or directory
I1129 09:01:39.424997 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 --> /var/lib/minikube/images/pause_3.10.1 (321024 bytes)
I1129 09:01:39.425030 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-proxy_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-proxy_v1.34.1': No such file or directory
I1129 09:01:39.425058 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 --> /var/lib/minikube/images/kube-proxy_v1.34.1 (25966080 bytes)
I1129 09:01:36.592807 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:01:36.593240 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:01:36.593304 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:01:36.593360 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:01:36.620981 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:36.621002 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:36.621008 460401 cri.go:89] found id: ""
I1129 09:01:36.621018 460401 logs.go:282] 2 containers: [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:01:36.621079 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.627593 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.632350 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:01:36.632420 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:01:36.660070 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:36.660091 460401 cri.go:89] found id: ""
I1129 09:01:36.660100 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:01:36.660156 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.664644 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:01:36.664720 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:01:36.696935 460401 cri.go:89] found id: ""
I1129 09:01:36.696967 460401 logs.go:282] 0 containers: []
W1129 09:01:36.696977 460401 logs.go:284] No container was found matching "coredns"
I1129 09:01:36.696985 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:01:36.697045 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:01:36.726832 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:36.726857 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:36.726864 460401 cri.go:89] found id: ""
I1129 09:01:36.726874 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:01:36.726928 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.732693 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.737783 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:01:36.737848 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:01:36.765201 460401 cri.go:89] found id: ""
I1129 09:01:36.765229 460401 logs.go:282] 0 containers: []
W1129 09:01:36.765238 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:01:36.765245 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:01:36.765300 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:01:36.795203 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:36.795231 460401 cri.go:89] found id: "f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:36.795237 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:36.795242 460401 cri.go:89] found id: ""
I1129 09:01:36.795251 460401 logs.go:282] 3 containers: [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:01:36.795316 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.801008 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.806325 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.811017 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:01:36.811088 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:01:36.840359 460401 cri.go:89] found id: ""
I1129 09:01:36.840386 460401 logs.go:282] 0 containers: []
W1129 09:01:36.840397 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:01:36.840406 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:01:36.840469 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:01:36.874045 460401 cri.go:89] found id: ""
I1129 09:01:36.874068 460401 logs.go:282] 0 containers: []
W1129 09:01:36.874075 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:01:36.874085 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:01:36.874099 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:01:36.950404 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:01:36.950426 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:01:36.950442 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:36.994232 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:01:36.994264 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:37.049507 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:01:37.049546 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:37.087133 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:01:37.087165 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:37.117577 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:01:37.117602 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:37.154176 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:01:37.154210 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:37.197090 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:01:37.197121 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:37.240775 460401 logs.go:123] Gathering logs for kube-controller-manager [f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00] ...
I1129 09:01:37.240811 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:37.269234 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:01:37.269260 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:01:37.312948 460401 logs.go:123] Gathering logs for container status ...
I1129 09:01:37.312979 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:01:37.348500 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:01:37.348527 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:01:37.435755 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:01:37.435786 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:01:39.440026 493486 out.go:252] - Booting up control plane ...
I1129 09:01:39.440161 493486 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1129 09:01:39.440285 493486 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1129 09:01:39.440970 493486 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1129 09:01:39.459308 493486 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1129 09:01:39.460971 493486 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1129 09:01:39.461057 493486 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1129 09:01:39.610284 493486 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1129 09:01:39.952440 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:01:39.952996 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:01:39.953076 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:01:39.953145 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:01:39.990073 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:39.990100 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:39.990107 460401 cri.go:89] found id: ""
I1129 09:01:39.990117 460401 logs.go:282] 2 containers: [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:01:39.990183 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.996871 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:40.002374 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:01:40.002458 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:01:40.036502 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:40.036525 460401 cri.go:89] found id: ""
I1129 09:01:40.036542 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:01:40.036600 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:40.044171 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:01:40.044261 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:01:40.084048 460401 cri.go:89] found id: ""
I1129 09:01:40.084165 460401 logs.go:282] 0 containers: []
W1129 09:01:40.084184 460401 logs.go:284] No container was found matching "coredns"
I1129 09:01:40.084195 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:01:40.084329 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:01:40.116869 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:40.116899 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:40.116905 460401 cri.go:89] found id: ""
I1129 09:01:40.116916 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:01:40.116982 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:40.123222 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:40.128079 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:01:40.128146 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:01:40.159071 460401 cri.go:89] found id: ""
I1129 09:01:40.159101 460401 logs.go:282] 0 containers: []
W1129 09:01:40.159112 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:01:40.159120 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:01:40.159178 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:01:40.191945 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:40.191973 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:40.191979 460401 cri.go:89] found id: ""
I1129 09:01:40.191990 460401 logs.go:282] 2 containers: [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:01:40.192055 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:40.197191 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:40.202276 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:01:40.202350 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:01:40.236481 460401 cri.go:89] found id: ""
I1129 09:01:40.236510 460401 logs.go:282] 0 containers: []
W1129 09:01:40.236521 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:01:40.236528 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:01:40.236597 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:01:40.266476 460401 cri.go:89] found id: ""
I1129 09:01:40.266505 460401 logs.go:282] 0 containers: []
W1129 09:01:40.266516 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:01:40.266529 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:01:40.266547 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:40.310670 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:01:40.310713 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:40.362446 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:01:40.362487 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:40.399108 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:01:40.399138 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:40.435770 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:01:40.435799 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:01:40.485497 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:01:40.485541 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:01:40.502944 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:01:40.502977 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:01:40.592582 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:01:40.592610 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:01:40.592626 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:40.634792 460401 logs.go:123] Gathering logs for container status ...
I1129 09:01:40.634828 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:01:40.678348 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:01:40.678382 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:01:40.797799 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:01:40.797849 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:40.854148 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:01:40.854196 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:43.404360 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:01:43.404858 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:01:43.404925 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:01:43.404996 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:01:43.435800 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:43.435836 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:43.435843 460401 cri.go:89] found id: ""
I1129 09:01:43.435854 460401 logs.go:282] 2 containers: [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:01:43.435923 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:43.441287 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:43.445761 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:01:43.445837 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:01:43.474830 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:43.474859 460401 cri.go:89] found id: ""
I1129 09:01:43.474870 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:01:43.474932 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:43.481397 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:01:43.481483 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:01:43.513967 460401 cri.go:89] found id: ""
I1129 09:01:43.513995 460401 logs.go:282] 0 containers: []
W1129 09:01:43.514006 460401 logs.go:284] No container was found matching "coredns"
I1129 09:01:43.514014 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:01:43.514074 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:01:43.550388 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:43.550416 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:43.550421 460401 cri.go:89] found id: ""
I1129 09:01:43.550431 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:01:43.550505 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:43.557316 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:43.563173 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:01:43.563248 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:01:43.599482 460401 cri.go:89] found id: ""
I1129 09:01:43.599524 460401 logs.go:282] 0 containers: []
W1129 09:01:43.599535 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:01:43.599545 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:01:43.599611 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:01:43.637030 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:43.637053 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:43.637059 460401 cri.go:89] found id: ""
I1129 09:01:43.637069 460401 logs.go:282] 2 containers: [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:01:43.637130 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:43.643786 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:43.650011 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:01:43.650089 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:01:43.687244 460401 cri.go:89] found id: ""
I1129 09:01:43.687273 460401 logs.go:282] 0 containers: []
W1129 09:01:43.687295 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:01:43.687303 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:01:43.687372 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:01:43.726453 460401 cri.go:89] found id: ""
I1129 09:01:43.726490 460401 logs.go:282] 0 containers: []
W1129 09:01:43.726501 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:01:43.726515 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:01:43.726533 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:43.795442 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:01:43.795490 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:43.841417 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:01:43.841457 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:43.888511 460401 logs.go:123] Gathering logs for container status ...
I1129 09:01:43.888554 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:01:43.930753 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:01:43.930789 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:01:44.043358 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:01:44.043410 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:01:44.065065 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:01:44.065107 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:44.112915 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:01:44.112958 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:01:44.174077 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:01:44.174120 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:01:44.247887 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:01:44.247909 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:01:44.247927 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:44.290842 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:01:44.290882 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:44.335297 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:01:44.335330 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:39.522040 494126 containerd.go:285] Loading image: /var/lib/minikube/images/pause_3.10.1
I1129 09:01:39.522116 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/pause_3.10.1
I1129 09:01:39.664265 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 from cache
I1129 09:01:39.664314 494126 containerd.go:285] Loading image: /var/lib/minikube/images/kube-scheduler_v1.34.1
I1129 09:01:39.664386 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1
I1129 09:01:40.291377 494126 containerd.go:267] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562"
I1129 09:01:40.291450 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:40.811289 494126 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1: (1.146868238s)
I1129 09:01:40.811331 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 from cache
I1129 09:01:40.811358 494126 containerd.go:285] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1129 09:01:40.811407 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1129 09:01:40.811531 494126 cache_images.go:118] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562" in container runtime
I1129 09:01:40.811570 494126 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:40.811610 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:41.858427 494126 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1: (1.046983131s)
I1129 09:01:41.858463 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 from cache
I1129 09:01:41.858488 494126 containerd.go:285] Loading image: /var/lib/minikube/images/coredns_v1.12.1
I1129 09:01:41.858484 494126 ssh_runner.go:235] Completed: which crictl: (1.046843529s)
I1129 09:01:41.858549 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1
I1129 09:01:41.858557 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:43.352594 494126 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.494004994s)
I1129 09:01:43.352634 494126 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1: (1.49406142s)
I1129 09:01:43.352657 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 from cache
I1129 09:01:43.352684 494126 containerd.go:285] Loading image: /var/lib/minikube/images/kube-apiserver_v1.34.1
I1129 09:01:43.352721 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:43.352741 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1
I1129 09:01:44.495181 494126 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.142420788s)
I1129 09:01:44.495251 494126 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1: (1.142485031s)
I1129 09:01:44.495274 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:44.495280 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 from cache
I1129 09:01:44.495307 494126 containerd.go:285] Loading image: /var/lib/minikube/images/kube-proxy_v1.34.1
I1129 09:01:44.495357 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1
I1129 09:01:44.611298 493486 kubeadm.go:319] [apiclient] All control plane components are healthy after 5.002099 seconds
I1129 09:01:44.611461 493486 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1129 09:01:44.626505 493486 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1129 09:01:45.150669 493486 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1129 09:01:45.150981 493486 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-295154 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1129 09:01:45.666153 493486 kubeadm.go:319] [bootstrap-token] Using token: fc3siq.brm7sjv6bjwb7j34
I1129 09:01:45.667757 493486 out.go:252] - Configuring RBAC rules ...
I1129 09:01:45.667991 493486 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1129 09:01:45.673404 493486 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1129 09:01:45.685336 493486 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1129 09:01:45.691974 493486 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1129 09:01:45.695311 493486 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1129 09:01:45.698699 493486 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1129 09:01:45.712796 493486 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1129 09:01:45.913473 493486 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1129 09:01:46.081267 493486 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1129 09:01:46.081993 493486 kubeadm.go:319]
I1129 09:01:46.082087 493486 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1129 09:01:46.082095 493486 kubeadm.go:319]
I1129 09:01:46.082160 493486 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1129 09:01:46.082179 493486 kubeadm.go:319]
I1129 09:01:46.082199 493486 kubeadm.go:319] mkdir -p $HOME/.kube
I1129 09:01:46.082251 493486 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1129 09:01:46.082302 493486 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1129 09:01:46.082308 493486 kubeadm.go:319]
I1129 09:01:46.082372 493486 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1129 09:01:46.082377 493486 kubeadm.go:319]
I1129 09:01:46.082434 493486 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1129 09:01:46.082445 493486 kubeadm.go:319]
I1129 09:01:46.082520 493486 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1129 09:01:46.082627 493486 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1129 09:01:46.082750 493486 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1129 09:01:46.082756 493486 kubeadm.go:319]
I1129 09:01:46.082891 493486 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1129 09:01:46.083019 493486 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1129 09:01:46.083030 493486 kubeadm.go:319]
I1129 09:01:46.083149 493486 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token fc3siq.brm7sjv6bjwb7j34 \
I1129 09:01:46.083319 493486 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:cfb13a4080e942b53ddf5e01885fcdd270ac918e177076400130991e2b6b7778 \
I1129 09:01:46.083366 493486 kubeadm.go:319] --control-plane
I1129 09:01:46.083383 493486 kubeadm.go:319]
I1129 09:01:46.083539 493486 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1129 09:01:46.083561 493486 kubeadm.go:319]
I1129 09:01:46.083696 493486 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token fc3siq.brm7sjv6bjwb7j34 \
I1129 09:01:46.083889 493486 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:cfb13a4080e942b53ddf5e01885fcdd270ac918e177076400130991e2b6b7778
I1129 09:01:46.087692 493486 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1129 09:01:46.087874 493486 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1129 09:01:46.087925 493486 cni.go:84] Creating CNI manager for ""
I1129 09:01:46.087942 493486 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1129 09:01:46.089437 493486 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1129 09:01:46.093295 493486 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1129 09:01:46.100033 493486 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1129 09:01:46.100061 493486 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1129 09:01:46.118046 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1129 09:01:47.108562 493486 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1129 09:01:47.108767 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:47.108838 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-295154 minikube.k8s.io/updated_at=2025_11_29T09_01_47_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=d0eb20ec824c82ab3f24099c8b785e0a2a5789af minikube.k8s.io/name=old-k8s-version-295154 minikube.k8s.io/primary=true
I1129 09:01:47.209163 493486 ops.go:34] apiserver oom_adj: -16
I1129 09:01:47.209168 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:47.709726 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:48.209857 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:44.521775 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5
I1129 09:01:44.521916 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5
I1129 09:01:45.636811 494126 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1: (1.141419574s)
I1129 09:01:45.636849 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 from cache
I1129 09:01:45.636857 494126 ssh_runner.go:235] Completed: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: (1.114924181s)
I1129 09:01:45.636879 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/storage-provisioner_v5: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/storage-provisioner_v5': No such file or directory
I1129 09:01:45.636882 494126 containerd.go:285] Loading image: /var/lib/minikube/images/etcd_3.6.4-0
I1129 09:01:45.636902 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 --> /var/lib/minikube/images/storage-provisioner_v5 (9060352 bytes)
I1129 09:01:45.636924 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0
I1129 09:01:48.452908 494126 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0: (2.815950505s)
I1129 09:01:48.452936 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 from cache
I1129 09:01:48.452972 494126 containerd.go:285] Loading image: /var/lib/minikube/images/storage-provisioner_v5
I1129 09:01:48.453041 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5
I1129 09:01:49.370622 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
I1129 09:01:49.370663 494126 cache_images.go:125] Successfully loaded all cached images
I1129 09:01:49.370668 494126 cache_images.go:94] duration metric: took 10.495116704s to LoadCachedImages
I1129 09:01:49.370682 494126 kubeadm.go:935] updating node { 192.168.103.2 8443 v1.34.1 containerd true true} ...
I1129 09:01:49.370811 494126 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-924441 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.103.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:no-preload-924441 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1129 09:01:49.370873 494126 ssh_runner.go:195] Run: sudo crictl info
I1129 09:01:49.397690 494126 cni.go:84] Creating CNI manager for ""
I1129 09:01:49.397714 494126 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1129 09:01:49.397740 494126 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1129 09:01:49.397786 494126 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.103.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-924441 NodeName:no-preload-924441 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.103.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.103.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1129 09:01:49.397929 494126 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.103.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "no-preload-924441"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.103.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.103.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1129 09:01:49.397999 494126 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1129 09:01:49.407101 494126 binaries.go:54] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.34.1: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/binaries/v1.34.1': No such file or directory
Initiating transfer...
I1129 09:01:49.407180 494126 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.34.1
I1129 09:01:49.415958 494126 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl.sha256
I1129 09:01:49.415978 494126 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet.sha256
I1129 09:01:49.416026 494126 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1129 09:01:49.416047 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl
I1129 09:01:49.415978 494126 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm.sha256
I1129 09:01:49.416149 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm
I1129 09:01:49.429834 494126 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubectl: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubectl': No such file or directory
I1129 09:01:49.429872 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/linux/amd64/v1.34.1/kubectl --> /var/lib/minikube/binaries/v1.34.1/kubectl (60559544 bytes)
I1129 09:01:49.429915 494126 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubeadm: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubeadm': No such file or directory
I1129 09:01:49.429924 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet
I1129 09:01:49.429943 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/linux/amd64/v1.34.1/kubeadm --> /var/lib/minikube/binaries/v1.34.1/kubeadm (74027192 bytes)
I1129 09:01:49.438987 494126 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubelet: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubelet': No such file or directory
I1129 09:01:49.439024 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/linux/amd64/v1.34.1/kubelet --> /var/lib/minikube/binaries/v1.34.1/kubelet (59195684 bytes)
I1129 09:01:46.884140 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:01:48.710027 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:49.210030 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:49.709395 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:50.209866 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:50.709354 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:51.209979 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:51.710291 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:52.209895 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:52.709970 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:53.209937 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:49.969644 494126 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1129 09:01:49.978574 494126 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (322 bytes)
I1129 09:01:49.992833 494126 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1129 09:01:50.009876 494126 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2232 bytes)
I1129 09:01:50.023695 494126 ssh_runner.go:195] Run: grep 192.168.103.2 control-plane.minikube.internal$ /etc/hosts
I1129 09:01:50.027747 494126 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.103.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1129 09:01:50.038376 494126 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:01:50.121247 494126 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1129 09:01:50.149394 494126 certs.go:69] Setting up /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441 for IP: 192.168.103.2
I1129 09:01:50.149417 494126 certs.go:195] generating shared ca certs ...
I1129 09:01:50.149438 494126 certs.go:227] acquiring lock for ca certs: {Name:mk5e6bcae0a6944966b241f3c6197a472703c991 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:50.149602 494126 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/22000-255825/.minikube/ca.key
I1129 09:01:50.149703 494126 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/22000-255825/.minikube/proxy-client-ca.key
I1129 09:01:50.149717 494126 certs.go:257] generating profile certs ...
I1129 09:01:50.149797 494126 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/client.key
I1129 09:01:50.149812 494126 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/client.crt with IP's: []
I1129 09:01:50.352856 494126 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/client.crt ...
I1129 09:01:50.352896 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/client.crt: {Name:mk24ad5255d5c075502606493622eaafcc9932fa Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:50.353102 494126 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/client.key ...
I1129 09:01:50.353115 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/client.key: {Name:mkdb2263ef25fafc1ea0385357022f8199c8aa35 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:50.353223 494126 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.key.f72e5c7b
I1129 09:01:50.353240 494126 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.crt.f72e5c7b with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.103.2]
I1129 09:01:50.513341 494126 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.crt.f72e5c7b ...
I1129 09:01:50.513379 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.crt.f72e5c7b: {Name:mk3f760c06958b6df21bcc9bde3527a0c97ad882 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:50.513582 494126 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.key.f72e5c7b ...
I1129 09:01:50.513601 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.key.f72e5c7b: {Name:mk4c8be15a8f6eca407c52c7afdc7ecb10357a29 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:50.513678 494126 certs.go:382] copying /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.crt.f72e5c7b -> /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.crt
I1129 09:01:50.513771 494126 certs.go:386] copying /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.key.f72e5c7b -> /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.key
I1129 09:01:50.513831 494126 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.key
I1129 09:01:50.513847 494126 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.crt with IP's: []
I1129 09:01:50.651114 494126 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.crt ...
I1129 09:01:50.651146 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.crt: {Name:mkbdace4e62ecdfbe11ae904155295b956ffc842 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:50.651330 494126 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.key ...
I1129 09:01:50.651343 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.key: {Name:mk14d837fb2449197c689047daf9f07db1da4b8c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:50.651522 494126 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/259483.pem (1338 bytes)
W1129 09:01:50.651563 494126 certs.go:480] ignoring /home/jenkins/minikube-integration/22000-255825/.minikube/certs/259483_empty.pem, impossibly tiny 0 bytes
I1129 09:01:50.651573 494126 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca-key.pem (1675 bytes)
I1129 09:01:50.651652 494126 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem (1078 bytes)
I1129 09:01:50.651691 494126 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem (1123 bytes)
I1129 09:01:50.651714 494126 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/key.pem (1679 bytes)
I1129 09:01:50.651769 494126 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem (1708 bytes)
I1129 09:01:50.652337 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1129 09:01:50.672071 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1129 09:01:50.691184 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1129 09:01:50.711306 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1129 09:01:50.730860 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1129 09:01:50.750662 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1671 bytes)
I1129 09:01:50.771690 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1129 09:01:50.791789 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1129 09:01:50.811356 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/certs/259483.pem --> /usr/share/ca-certificates/259483.pem (1338 bytes)
I1129 09:01:50.833983 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem --> /usr/share/ca-certificates/2594832.pem (1708 bytes)
I1129 09:01:50.853036 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1129 09:01:50.871262 494126 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1129 09:01:50.885099 494126 ssh_runner.go:195] Run: openssl version
I1129 09:01:50.892072 494126 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/259483.pem && ln -fs /usr/share/ca-certificates/259483.pem /etc/ssl/certs/259483.pem"
I1129 09:01:50.901864 494126 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/259483.pem
I1129 09:01:50.906616 494126 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 29 08:35 /usr/share/ca-certificates/259483.pem
I1129 09:01:50.906675 494126 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/259483.pem
I1129 09:01:50.943595 494126 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/259483.pem /etc/ssl/certs/51391683.0"
I1129 09:01:50.953459 494126 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/2594832.pem && ln -fs /usr/share/ca-certificates/2594832.pem /etc/ssl/certs/2594832.pem"
I1129 09:01:50.962610 494126 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2594832.pem
I1129 09:01:50.966703 494126 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 29 08:35 /usr/share/ca-certificates/2594832.pem
I1129 09:01:50.966778 494126 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2594832.pem
I1129 09:01:51.002253 494126 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/2594832.pem /etc/ssl/certs/3ec20f2e.0"
I1129 09:01:51.012487 494126 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1129 09:01:51.022391 494126 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1129 09:01:51.026710 494126 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 29 08:29 /usr/share/ca-certificates/minikubeCA.pem
I1129 09:01:51.026814 494126 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1129 09:01:51.063394 494126 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1129 09:01:51.073278 494126 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1129 09:01:51.077328 494126 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1129 09:01:51.077396 494126 kubeadm.go:401] StartCluster: {Name:no-preload-924441 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-924441 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISer
verNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFir
mwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1129 09:01:51.077489 494126 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1129 09:01:51.077532 494126 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1129 09:01:51.106096 494126 cri.go:89] found id: ""
I1129 09:01:51.106183 494126 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1129 09:01:51.115333 494126 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1129 09:01:51.123937 494126 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1129 09:01:51.124003 494126 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1129 09:01:51.132534 494126 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1129 09:01:51.132560 494126 kubeadm.go:158] found existing configuration files:
I1129 09:01:51.132605 494126 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1129 09:01:51.140877 494126 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1129 09:01:51.140937 494126 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1129 09:01:51.149370 494126 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1129 09:01:51.157660 494126 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1129 09:01:51.157716 494126 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1129 09:01:51.165600 494126 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1129 09:01:51.173968 494126 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1129 09:01:51.174023 494126 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1129 09:01:51.182141 494126 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1129 09:01:51.190488 494126 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1129 09:01:51.190548 494126 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1129 09:01:51.198568 494126 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1129 09:01:51.257848 494126 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1129 09:01:51.317135 494126 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1129 09:01:51.885035 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1129 09:01:51.885110 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:01:51.885188 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:01:51.917617 460401 cri.go:89] found id: "7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:01:51.917638 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:51.917644 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:51.917647 460401 cri.go:89] found id: ""
I1129 09:01:51.917655 460401 logs.go:282] 3 containers: [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:01:51.917717 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:51.923877 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:51.929304 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:51.934465 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:01:51.934561 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:01:51.963685 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:51.963708 460401 cri.go:89] found id: ""
I1129 09:01:51.963719 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:01:51.963801 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:51.968956 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:01:51.969028 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:01:51.996971 460401 cri.go:89] found id: ""
I1129 09:01:51.997000 460401 logs.go:282] 0 containers: []
W1129 09:01:51.997007 460401 logs.go:284] No container was found matching "coredns"
I1129 09:01:51.997013 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:01:51.997078 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:01:52.028822 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:52.028850 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:52.028856 460401 cri.go:89] found id: ""
I1129 09:01:52.028866 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:01:52.028936 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:52.034812 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:52.039943 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:01:52.040009 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:01:52.069835 460401 cri.go:89] found id: ""
I1129 09:01:52.069866 460401 logs.go:282] 0 containers: []
W1129 09:01:52.069878 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:01:52.069886 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:01:52.069952 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:01:52.104321 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:52.104340 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:52.104344 460401 cri.go:89] found id: ""
I1129 09:01:52.104352 460401 logs.go:282] 2 containers: [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:01:52.104402 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:52.109901 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:52.114778 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:01:52.114862 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:01:52.144981 460401 cri.go:89] found id: ""
I1129 09:01:52.145005 460401 logs.go:282] 0 containers: []
W1129 09:01:52.145013 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:01:52.145019 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:01:52.145069 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:01:52.174604 460401 cri.go:89] found id: ""
I1129 09:01:52.174632 460401 logs.go:282] 0 containers: []
W1129 09:01:52.174641 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:01:52.174651 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:01:52.174665 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:52.207427 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:01:52.207458 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:52.249558 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:01:52.249600 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:01:52.300742 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:01:52.300785 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:01:52.385321 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:01:52.385365 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:01:52.405491 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:01:52.405533 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:52.448465 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:01:52.448502 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:52.489466 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:01:52.489506 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:52.534107 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:01:52.534146 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:52.572361 460401 logs.go:123] Gathering logs for container status ...
I1129 09:01:52.572401 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:01:52.606656 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:01:52.606692 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1129 09:01:53.710005 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:54.209471 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:54.709414 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:55.209967 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:55.709378 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:56.210032 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:56.709982 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:57.209266 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:57.709968 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:58.209425 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:58.303052 493486 kubeadm.go:1114] duration metric: took 11.19438409s to wait for elevateKubeSystemPrivileges
I1129 09:01:58.303107 493486 kubeadm.go:403] duration metric: took 21.598001105s to StartCluster
I1129 09:01:58.303162 493486 settings.go:142] acquiring lock: {Name:mk6dbed29e5e99d89b1cbbd9e561d8f8791ae9ae Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:58.303278 493486 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/22000-255825/kubeconfig
I1129 09:01:58.305561 493486 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/kubeconfig: {Name:mk7d91966efd00ccef892cf02f31ec14469accbd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:58.305924 493486 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1129 09:01:58.306112 493486 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1129 09:01:58.306351 493486 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1129 09:01:58.306713 493486 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-295154"
I1129 09:01:58.306776 493486 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-295154"
I1129 09:01:58.306795 493486 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-295154"
I1129 09:01:58.306776 493486 config.go:182] Loaded profile config "old-k8s-version-295154": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1129 09:01:58.306807 493486 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-295154"
I1129 09:01:58.306834 493486 host.go:66] Checking if "old-k8s-version-295154" exists ...
I1129 09:01:58.307864 493486 out.go:179] * Verifying Kubernetes components...
I1129 09:01:58.307930 493486 cli_runner.go:164] Run: docker container inspect old-k8s-version-295154 --format={{.State.Status}}
I1129 09:01:58.308039 493486 cli_runner.go:164] Run: docker container inspect old-k8s-version-295154 --format={{.State.Status}}
I1129 09:01:58.309327 493486 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:01:58.335085 493486 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-295154"
I1129 09:01:58.335144 493486 host.go:66] Checking if "old-k8s-version-295154" exists ...
I1129 09:01:58.335642 493486 cli_runner.go:164] Run: docker container inspect old-k8s-version-295154 --format={{.State.Status}}
I1129 09:01:58.337139 493486 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:58.338693 493486 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1129 09:01:58.338716 493486 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1129 09:01:58.338899 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:58.368947 493486 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1129 09:01:58.368979 493486 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1129 09:01:58.369072 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:58.378680 493486 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa Username:docker}
I1129 09:01:58.399464 493486 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa Username:docker}
I1129 09:01:58.438617 493486 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1129 09:01:58.498671 493486 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1129 09:01:58.528524 493486 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1129 09:01:58.536443 493486 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1129 09:01:58.718007 493486 start.go:977] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I1129 09:01:58.719713 493486 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-295154" to be "Ready" ...
I1129 09:01:58.976512 493486 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1129 09:02:01.574795 494126 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1129 09:02:01.574869 494126 kubeadm.go:319] [preflight] Running pre-flight checks
I1129 09:02:01.575071 494126 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1129 09:02:01.575154 494126 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1129 09:02:01.575204 494126 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1129 09:02:01.575304 494126 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1129 09:02:01.575403 494126 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1129 09:02:01.575496 494126 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1129 09:02:01.575567 494126 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1129 09:02:01.575645 494126 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1129 09:02:01.575713 494126 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1129 09:02:01.575809 494126 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1129 09:02:01.575872 494126 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1129 09:02:01.575964 494126 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1129 09:02:01.576092 494126 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1129 09:02:01.576217 494126 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1129 09:02:01.576325 494126 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1129 09:02:01.578171 494126 out.go:252] - Generating certificates and keys ...
I1129 09:02:01.578298 494126 kubeadm.go:319] [certs] Using existing ca certificate authority
I1129 09:02:01.578401 494126 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1129 09:02:01.578499 494126 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1129 09:02:01.578589 494126 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1129 09:02:01.578680 494126 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1129 09:02:01.578785 494126 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1129 09:02:01.578876 494126 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1129 09:02:01.579019 494126 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost no-preload-924441] and IPs [192.168.103.2 127.0.0.1 ::1]
I1129 09:02:01.579122 494126 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1129 09:02:01.579311 494126 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost no-preload-924441] and IPs [192.168.103.2 127.0.0.1 ::1]
I1129 09:02:01.579420 494126 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1129 09:02:01.579532 494126 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1129 09:02:01.579609 494126 kubeadm.go:319] [certs] Generating "sa" key and public key
I1129 09:02:01.579696 494126 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1129 09:02:01.579806 494126 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1129 09:02:01.579894 494126 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1129 09:02:01.579971 494126 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1129 09:02:01.580076 494126 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1129 09:02:01.580125 494126 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1129 09:02:01.580195 494126 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1129 09:02:01.580259 494126 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1129 09:02:01.582121 494126 out.go:252] - Booting up control plane ...
I1129 09:02:01.582267 494126 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1129 09:02:01.582364 494126 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1129 09:02:01.582460 494126 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1129 09:02:01.582603 494126 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1129 09:02:01.582773 494126 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1129 09:02:01.582902 494126 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1129 09:02:01.583026 494126 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1129 09:02:01.583068 494126 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1129 09:02:01.583182 494126 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1129 09:02:01.583325 494126 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1129 09:02:01.583413 494126 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.001845652s
I1129 09:02:01.583537 494126 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1129 09:02:01.583671 494126 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.103.2:8443/livez
I1129 09:02:01.583787 494126 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1129 09:02:01.583879 494126 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1129 09:02:01.583985 494126 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 1.852889014s
I1129 09:02:01.584071 494126 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 2.023243656s
I1129 09:02:01.584163 494126 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 4.00195345s
I1129 09:02:01.584314 494126 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1129 09:02:01.584493 494126 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1129 09:02:01.584584 494126 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1129 09:02:01.584867 494126 kubeadm.go:319] [mark-control-plane] Marking the node no-preload-924441 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1129 09:02:01.584955 494126 kubeadm.go:319] [bootstrap-token] Using token: mvtuq7.pg2byk8o9fh5nfa2
I1129 09:02:01.587787 494126 out.go:252] - Configuring RBAC rules ...
I1129 09:02:01.587916 494126 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1129 09:02:01.588028 494126 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1129 09:02:01.588232 494126 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1129 09:02:01.588384 494126 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1129 09:02:01.588517 494126 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1129 09:02:01.588635 494126 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1129 09:02:01.588779 494126 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1129 09:02:01.588837 494126 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1129 09:02:01.588907 494126 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1129 09:02:01.588916 494126 kubeadm.go:319]
I1129 09:02:01.589016 494126 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1129 09:02:01.589032 494126 kubeadm.go:319]
I1129 09:02:01.589151 494126 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1129 09:02:01.589160 494126 kubeadm.go:319]
I1129 09:02:01.589205 494126 kubeadm.go:319] mkdir -p $HOME/.kube
I1129 09:02:01.589280 494126 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1129 09:02:01.589374 494126 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1129 09:02:01.589388 494126 kubeadm.go:319]
I1129 09:02:01.589465 494126 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1129 09:02:01.589473 494126 kubeadm.go:319]
I1129 09:02:01.589554 494126 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1129 09:02:01.589563 494126 kubeadm.go:319]
I1129 09:02:01.589607 494126 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1129 09:02:01.589671 494126 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1129 09:02:01.589782 494126 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1129 09:02:01.589795 494126 kubeadm.go:319]
I1129 09:02:01.589906 494126 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1129 09:02:01.590049 494126 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1129 09:02:01.590058 494126 kubeadm.go:319]
I1129 09:02:01.590132 494126 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token mvtuq7.pg2byk8o9fh5nfa2 \
I1129 09:02:01.590268 494126 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:cfb13a4080e942b53ddf5e01885fcdd270ac918e177076400130991e2b6b7778 \
I1129 09:02:01.590302 494126 kubeadm.go:319] --control-plane
I1129 09:02:01.590309 494126 kubeadm.go:319]
I1129 09:02:01.590425 494126 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1129 09:02:01.590434 494126 kubeadm.go:319]
I1129 09:02:01.590567 494126 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token mvtuq7.pg2byk8o9fh5nfa2 \
I1129 09:02:01.590744 494126 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:cfb13a4080e942b53ddf5e01885fcdd270ac918e177076400130991e2b6b7778
I1129 09:02:01.590761 494126 cni.go:84] Creating CNI manager for ""
I1129 09:02:01.590770 494126 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1129 09:02:01.592367 494126 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1129 09:01:58.977447 493486 addons.go:530] duration metric: took 671.096745ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1129 09:01:59.226693 493486 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-295154" context rescaled to 1 replicas
W1129 09:02:00.723077 493486 node_ready.go:57] node "old-k8s-version-295154" has "Ready":"False" status (will retry)
W1129 09:02:02.723240 493486 node_ready.go:57] node "old-k8s-version-295154" has "Ready":"False" status (will retry)
I1129 09:02:01.593492 494126 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1129 09:02:01.598544 494126 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.1/kubectl ...
I1129 09:02:01.598567 494126 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1129 09:02:01.615144 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1129 09:02:01.883935 494126 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1129 09:02:01.884024 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:01.884114 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes no-preload-924441 minikube.k8s.io/updated_at=2025_11_29T09_02_01_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=d0eb20ec824c82ab3f24099c8b785e0a2a5789af minikube.k8s.io/name=no-preload-924441 minikube.k8s.io/primary=true
I1129 09:02:01.969638 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:01.982178 494126 ops.go:34] apiserver oom_adj: -16
I1129 09:02:02.470301 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:02.969878 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:03.470379 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:03.970554 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:04.469853 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:02.669495 460401 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (10.062771993s)
W1129 09:02:02.669547 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Unable to connect to the server: net/http: TLS handshake timeout
output:
** stderr **
Unable to connect to the server: net/http: TLS handshake timeout
** /stderr **
I1129 09:02:02.669577 460401 logs.go:123] Gathering logs for kube-apiserver [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1] ...
I1129 09:02:02.669596 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:02.710559 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:02:02.710605 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:04.970119 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:05.470767 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:05.969852 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:06.052010 494126 kubeadm.go:1114] duration metric: took 4.168052566s to wait for elevateKubeSystemPrivileges
I1129 09:02:06.052057 494126 kubeadm.go:403] duration metric: took 14.974666914s to StartCluster
I1129 09:02:06.052081 494126 settings.go:142] acquiring lock: {Name:mk6dbed29e5e99d89b1cbbd9e561d8f8791ae9ae Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:02:06.052174 494126 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/22000-255825/kubeconfig
I1129 09:02:06.054258 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/kubeconfig: {Name:mk7d91966efd00ccef892cf02f31ec14469accbd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:02:06.054571 494126 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1129 09:02:06.054563 494126 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1129 09:02:06.054635 494126 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1129 09:02:06.054874 494126 config.go:182] Loaded profile config "no-preload-924441": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1129 09:02:06.054888 494126 addons.go:70] Setting storage-provisioner=true in profile "no-preload-924441"
I1129 09:02:06.054933 494126 addons.go:70] Setting default-storageclass=true in profile "no-preload-924441"
I1129 09:02:06.054947 494126 addons.go:239] Setting addon storage-provisioner=true in "no-preload-924441"
I1129 09:02:06.054963 494126 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "no-preload-924441"
I1129 09:02:06.055012 494126 host.go:66] Checking if "no-preload-924441" exists ...
I1129 09:02:06.055424 494126 cli_runner.go:164] Run: docker container inspect no-preload-924441 --format={{.State.Status}}
I1129 09:02:06.055667 494126 cli_runner.go:164] Run: docker container inspect no-preload-924441 --format={{.State.Status}}
I1129 09:02:06.056967 494126 out.go:179] * Verifying Kubernetes components...
I1129 09:02:06.060417 494126 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:02:06.083076 494126 addons.go:239] Setting addon default-storageclass=true in "no-preload-924441"
I1129 09:02:06.083127 494126 host.go:66] Checking if "no-preload-924441" exists ...
I1129 09:02:06.083615 494126 cli_runner.go:164] Run: docker container inspect no-preload-924441 --format={{.State.Status}}
I1129 09:02:06.086028 494126 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:02:06.087100 494126 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1129 09:02:06.087121 494126 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1129 09:02:06.087200 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:02:06.110337 494126 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1129 09:02:06.110366 494126 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1129 09:02:06.111183 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:02:06.116769 494126 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa Username:docker}
I1129 09:02:06.140007 494126 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa Username:docker}
I1129 09:02:06.151655 494126 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.103.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1129 09:02:06.208406 494126 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1129 09:02:06.241470 494126 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1129 09:02:06.273558 494126 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1129 09:02:06.324896 494126 start.go:977] {"host.minikube.internal": 192.168.103.1} host record injected into CoreDNS's ConfigMap
I1129 09:02:06.327889 494126 node_ready.go:35] waiting up to 6m0s for node "no-preload-924441" to be "Ready" ...
I1129 09:02:06.574594 494126 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
W1129 09:02:05.223590 493486 node_ready.go:57] node "old-k8s-version-295154" has "Ready":"False" status (will retry)
W1129 09:02:07.223929 493486 node_ready.go:57] node "old-k8s-version-295154" has "Ready":"False" status (will retry)
I1129 09:02:06.575644 494126 addons.go:530] duration metric: took 521.007476ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1129 09:02:06.830448 494126 kapi.go:214] "coredns" deployment in "kube-system" namespace and "no-preload-924441" context rescaled to 1 replicas
W1129 09:02:08.331406 494126 node_ready.go:57] node "no-preload-924441" has "Ready":"False" status (will retry)
I1129 09:02:05.259668 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:02:07.201576 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": read tcp 192.168.85.1:43246->192.168.85.2:8443: read: connection reset by peer
I1129 09:02:07.201690 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:02:07.201778 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:02:07.234753 460401 cri.go:89] found id: "7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:07.234781 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:02:07.234788 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:07.234793 460401 cri.go:89] found id: ""
I1129 09:02:07.234804 460401 logs.go:282] 3 containers: [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:02:07.234869 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.240257 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.245641 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.251131 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:02:07.251196 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:02:07.280579 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:07.280608 460401 cri.go:89] found id: ""
I1129 09:02:07.280621 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:02:07.280682 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.286123 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:02:07.286213 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:02:07.317491 460401 cri.go:89] found id: ""
I1129 09:02:07.317519 460401 logs.go:282] 0 containers: []
W1129 09:02:07.317528 460401 logs.go:284] No container was found matching "coredns"
I1129 09:02:07.317534 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:02:07.317586 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:02:07.347513 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:07.347534 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:07.347538 460401 cri.go:89] found id: ""
I1129 09:02:07.347546 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:02:07.347610 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.353144 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.358223 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:02:07.358303 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:02:07.387488 460401 cri.go:89] found id: ""
I1129 09:02:07.387516 460401 logs.go:282] 0 containers: []
W1129 09:02:07.387525 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:02:07.387532 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:02:07.387595 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:02:07.418490 460401 cri.go:89] found id: "c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:07.418512 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:07.418516 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:07.418519 460401 cri.go:89] found id: ""
I1129 09:02:07.418527 460401 logs.go:282] 3 containers: [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:02:07.418587 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.423956 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.429140 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.434196 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:02:07.434281 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:02:07.463114 460401 cri.go:89] found id: ""
I1129 09:02:07.463138 460401 logs.go:282] 0 containers: []
W1129 09:02:07.463148 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:02:07.463156 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:02:07.463222 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:02:07.494533 460401 cri.go:89] found id: ""
I1129 09:02:07.494567 460401 logs.go:282] 0 containers: []
W1129 09:02:07.494579 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:02:07.494592 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:02:07.494604 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:02:07.546238 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:02:07.546282 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:02:07.634664 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:02:07.634702 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:02:07.696753 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:02:07.696779 460401 logs.go:123] Gathering logs for kube-apiserver [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1] ...
I1129 09:02:07.696796 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:07.733303 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:02:07.733343 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:07.786770 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:02:07.786809 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:07.824791 460401 logs.go:123] Gathering logs for kube-controller-manager [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6] ...
I1129 09:02:07.824831 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:07.857029 460401 logs.go:123] Gathering logs for container status ...
I1129 09:02:07.857058 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:02:07.892009 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:02:07.892046 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:02:07.907552 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:02:07.907596 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
W1129 09:02:07.937558 460401 logs.go:130] failed kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095]: command: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095" /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095": Process exited with status 1
stdout:
stderr:
E1129 09:02:07.934436 4413 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095\": not found" containerID="5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
time="2025-11-29T09:02:07Z" level=fatal msg="rpc error: code = NotFound desc = an error occurred when try to find container \"5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095\": not found"
output:
** stderr **
E1129 09:02:07.934436 4413 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095\": not found" containerID="5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
time="2025-11-29T09:02:07Z" level=fatal msg="rpc error: code = NotFound desc = an error occurred when try to find container \"5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095\": not found"
** /stderr **
I1129 09:02:07.937577 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:02:07.937591 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:07.976501 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:02:07.976553 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:08.017968 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:02:08.018008 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:08.049057 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:02:08.049090 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
W1129 09:02:09.723662 493486 node_ready.go:57] node "old-k8s-version-295154" has "Ready":"False" status (will retry)
W1129 09:02:12.223024 493486 node_ready.go:57] node "old-k8s-version-295154" has "Ready":"False" status (will retry)
I1129 09:02:13.224090 493486 node_ready.go:49] node "old-k8s-version-295154" is "Ready"
I1129 09:02:13.224128 493486 node_ready.go:38] duration metric: took 14.504358398s for node "old-k8s-version-295154" to be "Ready" ...
I1129 09:02:13.224148 493486 api_server.go:52] waiting for apiserver process to appear ...
I1129 09:02:13.224211 493486 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1129 09:02:13.243313 493486 api_server.go:72] duration metric: took 14.93733902s to wait for apiserver process to appear ...
I1129 09:02:13.243343 493486 api_server.go:88] waiting for apiserver healthz status ...
I1129 09:02:13.243370 493486 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1129 09:02:13.250694 493486 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I1129 09:02:13.251984 493486 api_server.go:141] control plane version: v1.28.0
I1129 09:02:13.252015 493486 api_server.go:131] duration metric: took 8.663278ms to wait for apiserver health ...
I1129 09:02:13.252026 493486 system_pods.go:43] waiting for kube-system pods to appear ...
I1129 09:02:13.255767 493486 system_pods.go:59] 8 kube-system pods found
I1129 09:02:13.255813 493486 system_pods.go:61] "coredns-5dd5756b68-phw28" [7fc2b8dd-43dd-43df-8887-9ffa6de36fb4] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:13.255822 493486 system_pods.go:61] "etcd-old-k8s-version-295154" [b49cf7c8-8d72-4db9-a96f-d796fd8d9e08] Running
I1129 09:02:13.255829 493486 system_pods.go:61] "kindnet-k4n9l" [74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8] Running
I1129 09:02:13.255835 493486 system_pods.go:61] "kube-apiserver-old-k8s-version-295154" [e4ca0771-197f-4d77-97f0-7a7778e227de] Running
I1129 09:02:13.255841 493486 system_pods.go:61] "kube-controller-manager-old-k8s-version-295154" [6825ac68-da0d-474d-ac97-53398adffd73] Running
I1129 09:02:13.255847 493486 system_pods.go:61] "kube-proxy-4rfb4" [05ef67c3-0d6e-453d-a0e5-81c649c3e033] Running
I1129 09:02:13.255853 493486 system_pods.go:61] "kube-scheduler-old-k8s-version-295154" [97d5e6fb-5cb8-4a03-a8df-3f76df5b2671] Running
I1129 09:02:13.255860 493486 system_pods.go:61] "storage-provisioner" [359871fd-a77c-430a-87c1-b313992718e2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:13.255869 493486 system_pods.go:74] duration metric: took 3.834915ms to wait for pod list to return data ...
I1129 09:02:13.255879 493486 default_sa.go:34] waiting for default service account to be created ...
I1129 09:02:13.259936 493486 default_sa.go:45] found service account: "default"
I1129 09:02:13.259965 493486 default_sa.go:55] duration metric: took 4.078247ms for default service account to be created ...
I1129 09:02:13.259977 493486 system_pods.go:116] waiting for k8s-apps to be running ...
I1129 09:02:13.264489 493486 system_pods.go:86] 8 kube-system pods found
I1129 09:02:13.264528 493486 system_pods.go:89] "coredns-5dd5756b68-phw28" [7fc2b8dd-43dd-43df-8887-9ffa6de36fb4] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:13.264536 493486 system_pods.go:89] "etcd-old-k8s-version-295154" [b49cf7c8-8d72-4db9-a96f-d796fd8d9e08] Running
I1129 09:02:13.264545 493486 system_pods.go:89] "kindnet-k4n9l" [74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8] Running
I1129 09:02:13.264554 493486 system_pods.go:89] "kube-apiserver-old-k8s-version-295154" [e4ca0771-197f-4d77-97f0-7a7778e227de] Running
I1129 09:02:13.264562 493486 system_pods.go:89] "kube-controller-manager-old-k8s-version-295154" [6825ac68-da0d-474d-ac97-53398adffd73] Running
I1129 09:02:13.264567 493486 system_pods.go:89] "kube-proxy-4rfb4" [05ef67c3-0d6e-453d-a0e5-81c649c3e033] Running
I1129 09:02:13.264572 493486 system_pods.go:89] "kube-scheduler-old-k8s-version-295154" [97d5e6fb-5cb8-4a03-a8df-3f76df5b2671] Running
I1129 09:02:13.264586 493486 system_pods.go:89] "storage-provisioner" [359871fd-a77c-430a-87c1-b313992718e2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:13.264615 493486 retry.go:31] will retry after 309.906184ms: missing components: kube-dns
W1129 09:02:10.832100 494126 node_ready.go:57] node "no-preload-924441" has "Ready":"False" status (will retry)
W1129 09:02:13.330706 494126 node_ready.go:57] node "no-preload-924441" has "Ready":"False" status (will retry)
I1129 09:02:10.584596 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:02:10.585082 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:02:10.585139 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:02:10.585192 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:02:10.615813 460401 cri.go:89] found id: "7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:10.615833 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:10.615837 460401 cri.go:89] found id: ""
I1129 09:02:10.615846 460401 logs.go:282] 2 containers: [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:02:10.615910 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.621079 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.625927 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:02:10.626017 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:02:10.655780 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:10.655808 460401 cri.go:89] found id: ""
I1129 09:02:10.655817 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:02:10.655877 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.661197 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:02:10.661278 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:02:10.692401 460401 cri.go:89] found id: ""
I1129 09:02:10.692423 460401 logs.go:282] 0 containers: []
W1129 09:02:10.692431 460401 logs.go:284] No container was found matching "coredns"
I1129 09:02:10.692436 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:02:10.692496 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:02:10.721278 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:10.721303 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:10.721309 460401 cri.go:89] found id: ""
I1129 09:02:10.721320 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:02:10.721387 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.726913 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.731556 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:02:10.731637 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:02:10.759345 460401 cri.go:89] found id: ""
I1129 09:02:10.759373 460401 logs.go:282] 0 containers: []
W1129 09:02:10.759381 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:02:10.759386 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:02:10.759446 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:02:10.790190 460401 cri.go:89] found id: "c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:10.790215 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:10.790221 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:10.790226 460401 cri.go:89] found id: ""
I1129 09:02:10.790236 460401 logs.go:282] 3 containers: [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:02:10.790305 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.795588 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.800622 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.805263 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:02:10.805338 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:02:10.834942 460401 cri.go:89] found id: ""
I1129 09:02:10.834973 460401 logs.go:282] 0 containers: []
W1129 09:02:10.834991 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:02:10.834999 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:02:10.835065 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:02:10.872503 460401 cri.go:89] found id: ""
I1129 09:02:10.872536 460401 logs.go:282] 0 containers: []
W1129 09:02:10.872547 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:02:10.872562 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:02:10.872586 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:10.926644 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:02:10.926681 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:10.965025 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:02:10.965069 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:10.998068 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:02:10.998102 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:02:11.043686 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:02:11.043743 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:02:11.134380 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:02:11.134422 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:02:11.150475 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:02:11.150510 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:02:11.210329 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:02:11.210348 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:02:11.210364 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:11.250422 460401 logs.go:123] Gathering logs for kube-controller-manager [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6] ...
I1129 09:02:11.250457 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:11.280219 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:02:11.280255 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:11.315565 460401 logs.go:123] Gathering logs for container status ...
I1129 09:02:11.315596 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:02:11.349327 460401 logs.go:123] Gathering logs for kube-apiserver [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1] ...
I1129 09:02:11.349358 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:11.384696 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:02:11.384729 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:13.923850 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:02:13.924341 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:02:13.924398 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:02:13.924461 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:02:13.954410 460401 cri.go:89] found id: "7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:13.954430 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:13.954434 460401 cri.go:89] found id: ""
I1129 09:02:13.954442 460401 logs.go:282] 2 containers: [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:02:13.954501 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:13.959624 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:13.964312 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:02:13.964377 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:02:13.992596 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:13.992625 460401 cri.go:89] found id: ""
I1129 09:02:13.992636 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:02:13.992703 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:13.998893 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:02:13.998972 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:02:14.028106 460401 cri.go:89] found id: ""
I1129 09:02:14.028140 460401 logs.go:282] 0 containers: []
W1129 09:02:14.028152 460401 logs.go:284] No container was found matching "coredns"
I1129 09:02:14.028161 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:02:14.028230 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:02:14.057393 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:14.057414 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:14.057418 460401 cri.go:89] found id: ""
I1129 09:02:14.057427 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:02:14.057482 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:14.062623 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:14.067579 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:02:14.067654 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:02:14.102801 460401 cri.go:89] found id: ""
I1129 09:02:14.102840 460401 logs.go:282] 0 containers: []
W1129 09:02:14.102853 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:02:14.102860 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:02:14.102925 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:02:14.135951 460401 cri.go:89] found id: "c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:14.135979 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:14.135985 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:14.135988 460401 cri.go:89] found id: ""
I1129 09:02:14.135998 460401 logs.go:282] 3 containers: [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:02:14.136064 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:14.141983 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:14.147316 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:14.152463 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:02:14.152555 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:02:14.181365 460401 cri.go:89] found id: ""
I1129 09:02:14.181398 460401 logs.go:282] 0 containers: []
W1129 09:02:14.181409 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:02:14.181417 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:02:14.181477 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:02:14.210267 460401 cri.go:89] found id: ""
I1129 09:02:14.210292 460401 logs.go:282] 0 containers: []
W1129 09:02:14.210300 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:02:14.210310 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:02:14.210323 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:02:14.298625 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:02:14.298662 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:02:14.315504 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:02:14.315529 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:14.357098 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:02:14.357134 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:14.407082 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:02:14.407133 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:14.441442 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:02:14.441482 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:14.476419 460401 logs.go:123] Gathering logs for container status ...
I1129 09:02:14.476452 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:02:13.579150 493486 system_pods.go:86] 8 kube-system pods found
I1129 09:02:13.579183 493486 system_pods.go:89] "coredns-5dd5756b68-phw28" [7fc2b8dd-43dd-43df-8887-9ffa6de36fb4] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:13.579189 493486 system_pods.go:89] "etcd-old-k8s-version-295154" [b49cf7c8-8d72-4db9-a96f-d796fd8d9e08] Running
I1129 09:02:13.579195 493486 system_pods.go:89] "kindnet-k4n9l" [74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8] Running
I1129 09:02:13.579199 493486 system_pods.go:89] "kube-apiserver-old-k8s-version-295154" [e4ca0771-197f-4d77-97f0-7a7778e227de] Running
I1129 09:02:13.579203 493486 system_pods.go:89] "kube-controller-manager-old-k8s-version-295154" [6825ac68-da0d-474d-ac97-53398adffd73] Running
I1129 09:02:13.579206 493486 system_pods.go:89] "kube-proxy-4rfb4" [05ef67c3-0d6e-453d-a0e5-81c649c3e033] Running
I1129 09:02:13.579210 493486 system_pods.go:89] "kube-scheduler-old-k8s-version-295154" [97d5e6fb-5cb8-4a03-a8df-3f76df5b2671] Running
I1129 09:02:13.579220 493486 system_pods.go:89] "storage-provisioner" [359871fd-a77c-430a-87c1-b313992718e2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:13.579237 493486 retry.go:31] will retry after 360.039109ms: missing components: kube-dns
I1129 09:02:13.944039 493486 system_pods.go:86] 8 kube-system pods found
I1129 09:02:13.944084 493486 system_pods.go:89] "coredns-5dd5756b68-phw28" [7fc2b8dd-43dd-43df-8887-9ffa6de36fb4] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:13.944094 493486 system_pods.go:89] "etcd-old-k8s-version-295154" [b49cf7c8-8d72-4db9-a96f-d796fd8d9e08] Running
I1129 09:02:13.944104 493486 system_pods.go:89] "kindnet-k4n9l" [74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8] Running
I1129 09:02:13.944110 493486 system_pods.go:89] "kube-apiserver-old-k8s-version-295154" [e4ca0771-197f-4d77-97f0-7a7778e227de] Running
I1129 09:02:13.944116 493486 system_pods.go:89] "kube-controller-manager-old-k8s-version-295154" [6825ac68-da0d-474d-ac97-53398adffd73] Running
I1129 09:02:13.944121 493486 system_pods.go:89] "kube-proxy-4rfb4" [05ef67c3-0d6e-453d-a0e5-81c649c3e033] Running
I1129 09:02:13.944127 493486 system_pods.go:89] "kube-scheduler-old-k8s-version-295154" [97d5e6fb-5cb8-4a03-a8df-3f76df5b2671] Running
I1129 09:02:13.944133 493486 system_pods.go:89] "storage-provisioner" [359871fd-a77c-430a-87c1-b313992718e2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:13.944166 493486 retry.go:31] will retry after 339.658127ms: missing components: kube-dns
I1129 09:02:14.288499 493486 system_pods.go:86] 8 kube-system pods found
I1129 09:02:14.288533 493486 system_pods.go:89] "coredns-5dd5756b68-phw28" [7fc2b8dd-43dd-43df-8887-9ffa6de36fb4] Running
I1129 09:02:14.288543 493486 system_pods.go:89] "etcd-old-k8s-version-295154" [b49cf7c8-8d72-4db9-a96f-d796fd8d9e08] Running
I1129 09:02:14.288548 493486 system_pods.go:89] "kindnet-k4n9l" [74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8] Running
I1129 09:02:14.288553 493486 system_pods.go:89] "kube-apiserver-old-k8s-version-295154" [e4ca0771-197f-4d77-97f0-7a7778e227de] Running
I1129 09:02:14.288563 493486 system_pods.go:89] "kube-controller-manager-old-k8s-version-295154" [6825ac68-da0d-474d-ac97-53398adffd73] Running
I1129 09:02:14.288568 493486 system_pods.go:89] "kube-proxy-4rfb4" [05ef67c3-0d6e-453d-a0e5-81c649c3e033] Running
I1129 09:02:14.288573 493486 system_pods.go:89] "kube-scheduler-old-k8s-version-295154" [97d5e6fb-5cb8-4a03-a8df-3f76df5b2671] Running
I1129 09:02:14.288578 493486 system_pods.go:89] "storage-provisioner" [359871fd-a77c-430a-87c1-b313992718e2] Running
I1129 09:02:14.288588 493486 system_pods.go:126] duration metric: took 1.028603527s to wait for k8s-apps to be running ...
I1129 09:02:14.288601 493486 system_svc.go:44] waiting for kubelet service to be running ....
I1129 09:02:14.288662 493486 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1129 09:02:14.302535 493486 system_svc.go:56] duration metric: took 13.922382ms WaitForService to wait for kubelet
I1129 09:02:14.302570 493486 kubeadm.go:587] duration metric: took 15.996603485s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1129 09:02:14.302594 493486 node_conditions.go:102] verifying NodePressure condition ...
I1129 09:02:14.305508 493486 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1129 09:02:14.305535 493486 node_conditions.go:123] node cpu capacity is 8
I1129 09:02:14.305552 493486 node_conditions.go:105] duration metric: took 2.953214ms to run NodePressure ...
I1129 09:02:14.305564 493486 start.go:242] waiting for startup goroutines ...
I1129 09:02:14.305570 493486 start.go:247] waiting for cluster config update ...
I1129 09:02:14.305583 493486 start.go:256] writing updated cluster config ...
I1129 09:02:14.305887 493486 ssh_runner.go:195] Run: rm -f paused
I1129 09:02:14.309803 493486 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1129 09:02:14.314558 493486 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-phw28" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.319446 493486 pod_ready.go:94] pod "coredns-5dd5756b68-phw28" is "Ready"
I1129 09:02:14.319479 493486 pod_ready.go:86] duration metric: took 4.889509ms for pod "coredns-5dd5756b68-phw28" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.322499 493486 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.326608 493486 pod_ready.go:94] pod "etcd-old-k8s-version-295154" is "Ready"
I1129 09:02:14.326631 493486 pod_ready.go:86] duration metric: took 4.109693ms for pod "etcd-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.329352 493486 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.333844 493486 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-295154" is "Ready"
I1129 09:02:14.333867 493486 pod_ready.go:86] duration metric: took 4.49688ms for pod "kube-apiserver-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.336686 493486 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.714439 493486 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-295154" is "Ready"
I1129 09:02:14.714472 493486 pod_ready.go:86] duration metric: took 377.765984ms for pod "kube-controller-manager-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.915822 493486 pod_ready.go:83] waiting for pod "kube-proxy-4rfb4" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:15.314552 493486 pod_ready.go:94] pod "kube-proxy-4rfb4" is "Ready"
I1129 09:02:15.314586 493486 pod_ready.go:86] duration metric: took 398.736001ms for pod "kube-proxy-4rfb4" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:15.515989 493486 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:15.913869 493486 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-295154" is "Ready"
I1129 09:02:15.913896 493486 pod_ready.go:86] duration metric: took 397.877691ms for pod "kube-scheduler-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:15.913908 493486 pod_ready.go:40] duration metric: took 1.604073956s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1129 09:02:15.959941 493486 start.go:625] kubectl: 1.34.2, cluster: 1.28.0 (minor skew: 6)
I1129 09:02:15.961883 493486 out.go:203]
W1129 09:02:15.963183 493486 out.go:285] ! /usr/local/bin/kubectl is version 1.34.2, which may have incompatibilities with Kubernetes 1.28.0.
I1129 09:02:15.964449 493486 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1129 09:02:15.966035 493486 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-295154" cluster and "default" namespace by default
W1129 09:02:15.330798 494126 node_ready.go:57] node "no-preload-924441" has "Ready":"False" status (will retry)
W1129 09:02:17.331851 494126 node_ready.go:57] node "no-preload-924441" has "Ready":"False" status (will retry)
I1129 09:02:14.509454 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:02:14.509484 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:02:14.571273 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:02:14.571298 460401 logs.go:123] Gathering logs for kube-apiserver [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1] ...
I1129 09:02:14.571312 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:14.605440 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:02:14.605476 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:14.642678 460401 logs.go:123] Gathering logs for kube-controller-manager [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6] ...
I1129 09:02:14.642712 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:14.671483 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:02:14.671514 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:14.701619 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:02:14.701647 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:02:17.246912 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:02:17.247337 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:02:17.247422 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:02:17.247479 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:02:17.277610 460401 cri.go:89] found id: "7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:17.277632 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:17.277637 460401 cri.go:89] found id: ""
I1129 09:02:17.277647 460401 logs.go:282] 2 containers: [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:02:17.277711 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.283531 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.288554 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:02:17.288644 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:02:17.316819 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:17.316847 460401 cri.go:89] found id: ""
I1129 09:02:17.316857 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:02:17.316921 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.322640 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:02:17.322770 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:02:17.353531 460401 cri.go:89] found id: ""
I1129 09:02:17.353563 460401 logs.go:282] 0 containers: []
W1129 09:02:17.353575 460401 logs.go:284] No container was found matching "coredns"
I1129 09:02:17.353585 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:02:17.353651 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:02:17.384830 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:17.384854 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:17.384858 460401 cri.go:89] found id: ""
I1129 09:02:17.384867 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:02:17.384932 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.390132 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.395096 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:02:17.395177 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:02:17.425643 460401 cri.go:89] found id: ""
I1129 09:02:17.425681 460401 logs.go:282] 0 containers: []
W1129 09:02:17.425692 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:02:17.425704 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:02:17.425788 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:02:17.456077 460401 cri.go:89] found id: "c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:17.456105 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:17.456113 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:17.456136 460401 cri.go:89] found id: ""
I1129 09:02:17.456148 460401 logs.go:282] 3 containers: [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:02:17.456213 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.461610 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.466727 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.471762 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:02:17.471849 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:02:17.501750 460401 cri.go:89] found id: ""
I1129 09:02:17.501782 460401 logs.go:282] 0 containers: []
W1129 09:02:17.501793 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:02:17.501801 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:02:17.501868 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:02:17.531903 460401 cri.go:89] found id: ""
I1129 09:02:17.531932 460401 logs.go:282] 0 containers: []
W1129 09:02:17.531942 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:02:17.531956 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:02:17.531972 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:02:17.630517 460401 logs.go:123] Gathering logs for kube-apiserver [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1] ...
I1129 09:02:17.630566 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:17.667169 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:02:17.667205 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:17.707311 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:02:17.707360 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:17.746580 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:02:17.746621 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:17.799162 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:02:17.799207 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:17.839313 460401 logs.go:123] Gathering logs for kube-controller-manager [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6] ...
I1129 09:02:17.839355 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:17.872700 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:02:17.872742 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:17.904806 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:02:17.904838 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:02:17.920866 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:02:17.920904 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:02:17.983002 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:02:17.983027 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:02:17.983040 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:18.019203 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:02:18.019241 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:02:18.070893 460401 logs.go:123] Gathering logs for container status ...
I1129 09:02:18.070936 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
W1129 09:02:19.830479 494126 node_ready.go:57] node "no-preload-924441" has "Ready":"False" status (will retry)
I1129 09:02:20.833313 494126 node_ready.go:49] node "no-preload-924441" is "Ready"
I1129 09:02:20.833355 494126 node_ready.go:38] duration metric: took 14.505431475s for node "no-preload-924441" to be "Ready" ...
I1129 09:02:20.833377 494126 api_server.go:52] waiting for apiserver process to appear ...
I1129 09:02:20.833445 494126 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1129 09:02:20.850134 494126 api_server.go:72] duration metric: took 14.795523765s to wait for apiserver process to appear ...
I1129 09:02:20.850165 494126 api_server.go:88] waiting for apiserver healthz status ...
I1129 09:02:20.850190 494126 api_server.go:253] Checking apiserver healthz at https://192.168.103.2:8443/healthz ...
I1129 09:02:20.856514 494126 api_server.go:279] https://192.168.103.2:8443/healthz returned 200:
ok
I1129 09:02:20.857900 494126 api_server.go:141] control plane version: v1.34.1
I1129 09:02:20.857933 494126 api_server.go:131] duration metric: took 7.759312ms to wait for apiserver health ...
I1129 09:02:20.857945 494126 system_pods.go:43] waiting for kube-system pods to appear ...
I1129 09:02:20.861811 494126 system_pods.go:59] 8 kube-system pods found
I1129 09:02:20.861851 494126 system_pods.go:61] "coredns-66bc5c9577-nsh8w" [bf2a8ab9-aaca-4ee6-a390-a02099f693d9] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:20.861863 494126 system_pods.go:61] "etcd-no-preload-924441" [e3cda1b0-1ca8-4ded-a506-f728fc050781] Running
I1129 09:02:20.861871 494126 system_pods.go:61] "kindnet-nscfk" [052c2152-0369-4121-b2fe-25b79a00145a] Running
I1129 09:02:20.861877 494126 system_pods.go:61] "kube-apiserver-no-preload-924441" [08168b39-5d95-4d6b-ac99-3c6ee50a2530] Running
I1129 09:02:20.861892 494126 system_pods.go:61] "kube-controller-manager-no-preload-924441" [9e84b562-ff11-40c1-a7ab-3682dbbae4be] Running
I1129 09:02:20.861897 494126 system_pods.go:61] "kube-proxy-96fcg" [c9fd8592-2ec4-4da3-a800-b136c118d379] Running
I1129 09:02:20.861902 494126 system_pods.go:61] "kube-scheduler-no-preload-924441" [91fa5a87-81d7-4b1c-8334-9c5c4fcf8997] Running
I1129 09:02:20.861912 494126 system_pods.go:61] "storage-provisioner" [88b64cf8-3233-47bb-be31-6f367a8a1433] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:20.861920 494126 system_pods.go:74] duration metric: took 3.967151ms to wait for pod list to return data ...
I1129 09:02:20.861931 494126 default_sa.go:34] waiting for default service account to be created ...
I1129 09:02:20.864542 494126 default_sa.go:45] found service account: "default"
I1129 09:02:20.864569 494126 default_sa.go:55] duration metric: took 2.631761ms for default service account to be created ...
I1129 09:02:20.864581 494126 system_pods.go:116] waiting for k8s-apps to be running ...
I1129 09:02:20.867876 494126 system_pods.go:86] 8 kube-system pods found
I1129 09:02:20.867913 494126 system_pods.go:89] "coredns-66bc5c9577-nsh8w" [bf2a8ab9-aaca-4ee6-a390-a02099f693d9] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:20.867924 494126 system_pods.go:89] "etcd-no-preload-924441" [e3cda1b0-1ca8-4ded-a506-f728fc050781] Running
I1129 09:02:20.867932 494126 system_pods.go:89] "kindnet-nscfk" [052c2152-0369-4121-b2fe-25b79a00145a] Running
I1129 09:02:20.867938 494126 system_pods.go:89] "kube-apiserver-no-preload-924441" [08168b39-5d95-4d6b-ac99-3c6ee50a2530] Running
I1129 09:02:20.867999 494126 system_pods.go:89] "kube-controller-manager-no-preload-924441" [9e84b562-ff11-40c1-a7ab-3682dbbae4be] Running
I1129 09:02:20.868005 494126 system_pods.go:89] "kube-proxy-96fcg" [c9fd8592-2ec4-4da3-a800-b136c118d379] Running
I1129 09:02:20.868011 494126 system_pods.go:89] "kube-scheduler-no-preload-924441" [91fa5a87-81d7-4b1c-8334-9c5c4fcf8997] Running
I1129 09:02:20.868027 494126 system_pods.go:89] "storage-provisioner" [88b64cf8-3233-47bb-be31-6f367a8a1433] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:20.868077 494126 retry.go:31] will retry after 292.54579ms: missing components: kube-dns
I1129 09:02:21.165357 494126 system_pods.go:86] 8 kube-system pods found
I1129 09:02:21.165399 494126 system_pods.go:89] "coredns-66bc5c9577-nsh8w" [bf2a8ab9-aaca-4ee6-a390-a02099f693d9] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:21.165408 494126 system_pods.go:89] "etcd-no-preload-924441" [e3cda1b0-1ca8-4ded-a506-f728fc050781] Running
I1129 09:02:21.165416 494126 system_pods.go:89] "kindnet-nscfk" [052c2152-0369-4121-b2fe-25b79a00145a] Running
I1129 09:02:21.165422 494126 system_pods.go:89] "kube-apiserver-no-preload-924441" [08168b39-5d95-4d6b-ac99-3c6ee50a2530] Running
I1129 09:02:21.165428 494126 system_pods.go:89] "kube-controller-manager-no-preload-924441" [9e84b562-ff11-40c1-a7ab-3682dbbae4be] Running
I1129 09:02:21.165434 494126 system_pods.go:89] "kube-proxy-96fcg" [c9fd8592-2ec4-4da3-a800-b136c118d379] Running
I1129 09:02:21.165439 494126 system_pods.go:89] "kube-scheduler-no-preload-924441" [91fa5a87-81d7-4b1c-8334-9c5c4fcf8997] Running
I1129 09:02:21.165449 494126 system_pods.go:89] "storage-provisioner" [88b64cf8-3233-47bb-be31-6f367a8a1433] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:21.165470 494126 retry.go:31] will retry after 336.406198ms: missing components: kube-dns
I1129 09:02:21.505471 494126 system_pods.go:86] 8 kube-system pods found
I1129 09:02:21.505510 494126 system_pods.go:89] "coredns-66bc5c9577-nsh8w" [bf2a8ab9-aaca-4ee6-a390-a02099f693d9] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:21.505516 494126 system_pods.go:89] "etcd-no-preload-924441" [e3cda1b0-1ca8-4ded-a506-f728fc050781] Running
I1129 09:02:21.505524 494126 system_pods.go:89] "kindnet-nscfk" [052c2152-0369-4121-b2fe-25b79a00145a] Running
I1129 09:02:21.505528 494126 system_pods.go:89] "kube-apiserver-no-preload-924441" [08168b39-5d95-4d6b-ac99-3c6ee50a2530] Running
I1129 09:02:21.505531 494126 system_pods.go:89] "kube-controller-manager-no-preload-924441" [9e84b562-ff11-40c1-a7ab-3682dbbae4be] Running
I1129 09:02:21.505534 494126 system_pods.go:89] "kube-proxy-96fcg" [c9fd8592-2ec4-4da3-a800-b136c118d379] Running
I1129 09:02:21.505538 494126 system_pods.go:89] "kube-scheduler-no-preload-924441" [91fa5a87-81d7-4b1c-8334-9c5c4fcf8997] Running
I1129 09:02:21.505542 494126 system_pods.go:89] "storage-provisioner" [88b64cf8-3233-47bb-be31-6f367a8a1433] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:21.505560 494126 retry.go:31] will retry after 447.535618ms: missing components: kube-dns
I1129 09:02:21.957409 494126 system_pods.go:86] 8 kube-system pods found
I1129 09:02:21.957439 494126 system_pods.go:89] "coredns-66bc5c9577-nsh8w" [bf2a8ab9-aaca-4ee6-a390-a02099f693d9] Running
I1129 09:02:21.957444 494126 system_pods.go:89] "etcd-no-preload-924441" [e3cda1b0-1ca8-4ded-a506-f728fc050781] Running
I1129 09:02:21.957448 494126 system_pods.go:89] "kindnet-nscfk" [052c2152-0369-4121-b2fe-25b79a00145a] Running
I1129 09:02:21.957451 494126 system_pods.go:89] "kube-apiserver-no-preload-924441" [08168b39-5d95-4d6b-ac99-3c6ee50a2530] Running
I1129 09:02:21.957456 494126 system_pods.go:89] "kube-controller-manager-no-preload-924441" [9e84b562-ff11-40c1-a7ab-3682dbbae4be] Running
I1129 09:02:21.957459 494126 system_pods.go:89] "kube-proxy-96fcg" [c9fd8592-2ec4-4da3-a800-b136c118d379] Running
I1129 09:02:21.957464 494126 system_pods.go:89] "kube-scheduler-no-preload-924441" [91fa5a87-81d7-4b1c-8334-9c5c4fcf8997] Running
I1129 09:02:21.957467 494126 system_pods.go:89] "storage-provisioner" [88b64cf8-3233-47bb-be31-6f367a8a1433] Running
I1129 09:02:21.957476 494126 system_pods.go:126] duration metric: took 1.092887723s to wait for k8s-apps to be running ...
I1129 09:02:21.957498 494126 system_svc.go:44] waiting for kubelet service to be running ....
I1129 09:02:21.957549 494126 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1129 09:02:21.971582 494126 system_svc.go:56] duration metric: took 14.071974ms WaitForService to wait for kubelet
I1129 09:02:21.971613 494126 kubeadm.go:587] duration metric: took 15.917009838s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1129 09:02:21.971632 494126 node_conditions.go:102] verifying NodePressure condition ...
I1129 09:02:21.974426 494126 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1129 09:02:21.974453 494126 node_conditions.go:123] node cpu capacity is 8
I1129 09:02:21.974471 494126 node_conditions.go:105] duration metric: took 2.83418ms to run NodePressure ...
I1129 09:02:21.974485 494126 start.go:242] waiting for startup goroutines ...
I1129 09:02:21.974492 494126 start.go:247] waiting for cluster config update ...
I1129 09:02:21.974502 494126 start.go:256] writing updated cluster config ...
I1129 09:02:21.974780 494126 ssh_runner.go:195] Run: rm -f paused
I1129 09:02:21.978967 494126 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1129 09:02:21.982434 494126 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-nsh8w" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:21.986370 494126 pod_ready.go:94] pod "coredns-66bc5c9577-nsh8w" is "Ready"
I1129 09:02:21.986395 494126 pod_ready.go:86] duration metric: took 3.939701ms for pod "coredns-66bc5c9577-nsh8w" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:21.988365 494126 pod_ready.go:83] waiting for pod "etcd-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:21.991850 494126 pod_ready.go:94] pod "etcd-no-preload-924441" is "Ready"
I1129 09:02:21.991874 494126 pod_ready.go:86] duration metric: took 3.486388ms for pod "etcd-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:21.993587 494126 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:21.997072 494126 pod_ready.go:94] pod "kube-apiserver-no-preload-924441" is "Ready"
I1129 09:02:21.997092 494126 pod_ready.go:86] duration metric: took 3.484304ms for pod "kube-apiserver-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:21.998698 494126 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:22.382918 494126 pod_ready.go:94] pod "kube-controller-manager-no-preload-924441" is "Ready"
I1129 09:02:22.382948 494126 pod_ready.go:86] duration metric: took 384.232783ms for pod "kube-controller-manager-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:22.583125 494126 pod_ready.go:83] waiting for pod "kube-proxy-96fcg" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:22.982608 494126 pod_ready.go:94] pod "kube-proxy-96fcg" is "Ready"
I1129 09:02:22.982639 494126 pod_ready.go:86] duration metric: took 399.48383ms for pod "kube-proxy-96fcg" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:23.184031 494126 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:23.583027 494126 pod_ready.go:94] pod "kube-scheduler-no-preload-924441" is "Ready"
I1129 09:02:23.583058 494126 pod_ready.go:86] duration metric: took 399.00134ms for pod "kube-scheduler-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:23.583071 494126 pod_ready.go:40] duration metric: took 1.604064431s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1129 09:02:23.632822 494126 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1129 09:02:23.634677 494126 out.go:179] * Done! kubectl is now configured to use "no-preload-924441" cluster and "default" namespace by default
I1129 09:02:20.607959 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:02:20.608406 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:02:20.608469 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:02:20.608531 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:02:20.639116 460401 cri.go:89] found id: "7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:20.639148 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:20.639155 460401 cri.go:89] found id: ""
I1129 09:02:20.639168 460401 logs.go:282] 2 containers: [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:02:20.639240 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:20.644749 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:20.649347 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:02:20.649411 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:02:20.677383 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:20.677404 460401 cri.go:89] found id: ""
I1129 09:02:20.677413 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:02:20.677466 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:20.682625 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:02:20.682708 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:02:20.711021 460401 cri.go:89] found id: ""
I1129 09:02:20.711050 460401 logs.go:282] 0 containers: []
W1129 09:02:20.711060 460401 logs.go:284] No container was found matching "coredns"
I1129 09:02:20.711070 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:02:20.711138 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:02:20.745598 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:20.745626 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:20.745632 460401 cri.go:89] found id: ""
I1129 09:02:20.745643 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:02:20.745716 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:20.751838 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:20.757804 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:02:20.757881 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:02:20.793640 460401 cri.go:89] found id: ""
I1129 09:02:20.793671 460401 logs.go:282] 0 containers: []
W1129 09:02:20.793683 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:02:20.793691 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:02:20.793792 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:02:20.830071 460401 cri.go:89] found id: "c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:20.830099 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:20.830104 460401 cri.go:89] found id: ""
I1129 09:02:20.830114 460401 logs.go:282] 2 containers: [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:02:20.830179 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:20.837576 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:20.843146 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:02:20.843225 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:02:20.883480 460401 cri.go:89] found id: ""
I1129 09:02:20.883525 460401 logs.go:282] 0 containers: []
W1129 09:02:20.883536 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:02:20.883543 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:02:20.883598 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:02:20.923499 460401 cri.go:89] found id: ""
I1129 09:02:20.923532 460401 logs.go:282] 0 containers: []
W1129 09:02:20.923543 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:02:20.923557 460401 logs.go:123] Gathering logs for kube-controller-manager [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6] ...
I1129 09:02:20.923574 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:20.961675 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:02:20.961713 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:20.996489 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:02:20.996524 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:02:21.046535 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:02:21.046596 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:02:21.131239 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:02:21.131286 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:02:21.192537 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:02:21.192557 460401 logs.go:123] Gathering logs for kube-apiserver [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1] ...
I1129 09:02:21.192573 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:21.227894 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:02:21.227932 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:21.262592 460401 logs.go:123] Gathering logs for container status ...
I1129 09:02:21.262632 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:02:21.298034 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:02:21.298076 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:02:21.313593 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:02:21.313626 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:21.355840 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:02:21.355878 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:21.409528 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:02:21.409570 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:23.946261 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:02:23.946794 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:02:23.946872 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:02:23.946940 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:02:23.978496 460401 cri.go:89] found id: "7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:23.978521 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:23.978525 460401 cri.go:89] found id: ""
I1129 09:02:23.978533 460401 logs.go:282] 2 containers: [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:02:23.978585 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:23.983820 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:23.988502 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:02:23.988563 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:02:24.017479 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:24.017505 460401 cri.go:89] found id: ""
I1129 09:02:24.017516 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:02:24.017581 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:24.022978 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:02:24.023049 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:02:24.054017 460401 cri.go:89] found id: ""
I1129 09:02:24.054042 460401 logs.go:282] 0 containers: []
W1129 09:02:24.054049 460401 logs.go:284] No container was found matching "coredns"
I1129 09:02:24.054055 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:02:24.054104 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:02:24.083682 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:24.083704 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:24.083710 460401 cri.go:89] found id: ""
I1129 09:02:24.083720 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:02:24.083797 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:24.089191 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:24.094144 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:02:24.094223 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:02:24.123931 460401 cri.go:89] found id: ""
I1129 09:02:24.123956 460401 logs.go:282] 0 containers: []
W1129 09:02:24.123964 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:02:24.123972 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:02:24.124032 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:02:24.158678 460401 cri.go:89] found id: "c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:24.158704 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:24.158710 460401 cri.go:89] found id: ""
I1129 09:02:24.158721 460401 logs.go:282] 2 containers: [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:02:24.158824 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:24.164380 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:24.170117 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:02:24.170196 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:02:24.202016 460401 cri.go:89] found id: ""
I1129 09:02:24.202057 460401 logs.go:282] 0 containers: []
W1129 09:02:24.202066 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:02:24.202072 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:02:24.202123 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:02:24.235359 460401 cri.go:89] found id: ""
I1129 09:02:24.235388 460401 logs.go:282] 0 containers: []
W1129 09:02:24.235399 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:02:24.235412 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:02:24.235427 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:02:24.327121 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:02:24.327167 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:24.380608 460401 logs.go:123] Gathering logs for kube-controller-manager [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6] ...
I1129 09:02:24.380651 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:24.411895 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:02:24.411923 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:24.450543 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:02:24.450575 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:02:24.500105 460401 logs.go:123] Gathering logs for container status ...
I1129 09:02:24.500146 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
64dcae39f0e63 56cc512116c8f 7 seconds ago Running busybox 0 c3b03930e2672 busybox default
84eb7f692c990 ead0a4a53df89 13 seconds ago Running coredns 0 46a4885d817e8 coredns-5dd5756b68-phw28 kube-system
c2b64aca34f8b 6e38f40d628db 13 seconds ago Running storage-provisioner 0 f0e9f57ece0e7 storage-provisioner kube-system
c556471fd7ebd 409467f978b4a 24 seconds ago Running kindnet-cni 0 c9cb87dbe2bae kindnet-k4n9l kube-system
c3eb6059b5593 ea1030da44aa1 27 seconds ago Running kube-proxy 0 d9056ddc2e968 kube-proxy-4rfb4 kube-system
ec1e8ae808249 f6f496300a2ae 45 seconds ago Running kube-scheduler 0 7caf413f5769e kube-scheduler-old-k8s-version-295154 kube-system
b3d9ef849b109 4be79c38a4bab 45 seconds ago Running kube-controller-manager 0 f845d639a6e89 kube-controller-manager-old-k8s-version-295154 kube-system
e534f6de34cb5 73deb9a3f7025 45 seconds ago Running etcd 0 83b4224fe982d etcd-old-k8s-version-295154 kube-system
c912b0431f5b9 bb5e0dde9054c 45 seconds ago Running kube-apiserver 0 c5ef1020ba416 kube-apiserver-old-k8s-version-295154 kube-system
==> containerd <==
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.171284629Z" level=info msg="CreateContainer within sandbox \"f0e9f57ece0e7298ea8ff52e824c152b0a198734fa271e11f9da85ab94980def\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"c2b64aca34f8b72337fd1dd9bda969ab607f739b3b5bd64a9962706bb51f1368\""
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.171952045Z" level=info msg="StartContainer for \"c2b64aca34f8b72337fd1dd9bda969ab607f739b3b5bd64a9962706bb51f1368\""
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.173213037Z" level=info msg="connecting to shim c2b64aca34f8b72337fd1dd9bda969ab607f739b3b5bd64a9962706bb51f1368" address="unix:///run/containerd/s/dc122ba824fb2ecb94628ad2391429e4d2b98c17ac396814c4a25b4d93b141fe" protocol=ttrpc version=3
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.175196491Z" level=info msg="CreateContainer within sandbox \"46a4885d817e84fab45e9ad70e7c335ccc0f307e19f484641f3f563e19a3b305\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"84eb7f692c99059489020b59b47c169ecc9d4286a2bf7a532dae7f5d13e68795\""
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.175823701Z" level=info msg="StartContainer for \"84eb7f692c99059489020b59b47c169ecc9d4286a2bf7a532dae7f5d13e68795\""
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.176634429Z" level=info msg="connecting to shim 84eb7f692c99059489020b59b47c169ecc9d4286a2bf7a532dae7f5d13e68795" address="unix:///run/containerd/s/950489f09bce35a172bb4082bad530c176c650052c0ffe9dab18daf70ee3f021" protocol=ttrpc version=3
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.230846483Z" level=info msg="StartContainer for \"c2b64aca34f8b72337fd1dd9bda969ab607f739b3b5bd64a9962706bb51f1368\" returns successfully"
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.234243145Z" level=info msg="StartContainer for \"84eb7f692c99059489020b59b47c169ecc9d4286a2bf7a532dae7f5d13e68795\" returns successfully"
Nov 29 09:02:16 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:16.439586027Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:54baf2f4-8de5-4f66-92ac-f5315174d940,Namespace:default,Attempt:0,}"
Nov 29 09:02:16 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:16.482219935Z" level=info msg="connecting to shim c3b03930e26728c610c785b965715fd3b553dfa8fa71b6e35bcc2370b534d413" address="unix:///run/containerd/s/705109ebb456d589bcc59459487d5f036c6a54c53bc3e7a7b9f9e1b41d8f56cc" namespace=k8s.io protocol=ttrpc version=3
Nov 29 09:02:16 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:16.554186463Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:54baf2f4-8de5-4f66-92ac-f5315174d940,Namespace:default,Attempt:0,} returns sandbox id \"c3b03930e26728c610c785b965715fd3b553dfa8fa71b6e35bcc2370b534d413\""
Nov 29 09:02:16 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:16.556162494Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.188092236Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.188755127Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396643"
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.190108938Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.192089044Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.192508223Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.636298875s"
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.192553605Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.194479178Z" level=info msg="CreateContainer within sandbox \"c3b03930e26728c610c785b965715fd3b553dfa8fa71b6e35bcc2370b534d413\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.201487714Z" level=info msg="Container 64dcae39f0e638d4b6c6e188a3cb9da7d32231fa3ff9ad25ba54b2c00601f705: CDI devices from CRI Config.CDIDevices: []"
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.207643963Z" level=info msg="CreateContainer within sandbox \"c3b03930e26728c610c785b965715fd3b553dfa8fa71b6e35bcc2370b534d413\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"64dcae39f0e638d4b6c6e188a3cb9da7d32231fa3ff9ad25ba54b2c00601f705\""
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.208357251Z" level=info msg="StartContainer for \"64dcae39f0e638d4b6c6e188a3cb9da7d32231fa3ff9ad25ba54b2c00601f705\""
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.209198742Z" level=info msg="connecting to shim 64dcae39f0e638d4b6c6e188a3cb9da7d32231fa3ff9ad25ba54b2c00601f705" address="unix:///run/containerd/s/705109ebb456d589bcc59459487d5f036c6a54c53bc3e7a7b9f9e1b41d8f56cc" protocol=ttrpc version=3
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.268677673Z" level=info msg="StartContainer for \"64dcae39f0e638d4b6c6e188a3cb9da7d32231fa3ff9ad25ba54b2c00601f705\" returns successfully"
Nov 29 09:02:25 old-k8s-version-295154 containerd[663]: E1129 09:02:25.213853 663 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [84eb7f692c99059489020b59b47c169ecc9d4286a2bf7a532dae7f5d13e68795] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = b7aacdf6a6aa730aafe4d018cac9b7b5ecfb346cba84a99f64521f87aef8b4958639c1cf97967716465791d05bd38f372615327b7cb1d93c850bae532744d54d
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:46306 - 2219 "HINFO IN 2134159150006616805.6033665223682648056. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.036424572s
==> describe nodes <==
Name: old-k8s-version-295154
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-295154
kubernetes.io/os=linux
minikube.k8s.io/commit=d0eb20ec824c82ab3f24099c8b785e0a2a5789af
minikube.k8s.io/name=old-k8s-version-295154
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_29T09_01_47_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sat, 29 Nov 2025 09:01:42 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-295154
AcquireTime: <unset>
RenewTime: Sat, 29 Nov 2025 09:02:16 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sat, 29 Nov 2025 09:02:16 +0000 Sat, 29 Nov 2025 09:01:41 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 29 Nov 2025 09:02:16 +0000 Sat, 29 Nov 2025 09:01:41 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 29 Nov 2025 09:02:16 +0000 Sat, 29 Nov 2025 09:01:41 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sat, 29 Nov 2025 09:02:16 +0000 Sat, 29 Nov 2025 09:02:12 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: old-k8s-version-295154
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863348Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863348Ki
pods: 110
System Info:
Machine ID: 9629f1d5bc1ed524a56ce23c69214c09
System UUID: 22b437c1-66e6-4b41-85ab-28edf17772d8
Boot ID: b81dce2f-73d5-4349-b473-aa1210058cb8
Kernel Version: 6.8.0-1044-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 10s
kube-system coredns-5dd5756b68-phw28 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 28s
kube-system etcd-old-k8s-version-295154 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 40s
kube-system kindnet-k4n9l 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 28s
kube-system kube-apiserver-old-k8s-version-295154 250m (3%) 0 (0%) 0 (0%) 0 (0%) 40s
kube-system kube-controller-manager-old-k8s-version-295154 200m (2%) 0 (0%) 0 (0%) 0 (0%) 41s
kube-system kube-proxy-4rfb4 0 (0%) 0 (0%) 0 (0%) 0 (0%) 28s
kube-system kube-scheduler-old-k8s-version-295154 100m (1%) 0 (0%) 0 (0%) 0 (0%) 40s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 28s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 27s kube-proxy
Normal Starting 41s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 40s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 40s kubelet Node old-k8s-version-295154 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 40s kubelet Node old-k8s-version-295154 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 40s kubelet Node old-k8s-version-295154 status is now: NodeHasSufficientPID
Normal RegisteredNode 29s node-controller Node old-k8s-version-295154 event: Registered Node old-k8s-version-295154 in Controller
Normal NodeReady 14s kubelet Node old-k8s-version-295154 status is now: NodeReady
==> dmesg <==
[Nov29 07:17] TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.
[ +0.001881] MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.
[ +0.084003] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.378167] i8042: Warning: Keylock active
[ +0.012106] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.460417] block sda: the capability attribute has been deprecated.
[ +0.079627] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.021012] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +5.285522] kauditd_printk_skb: 47 callbacks suppressed
==> etcd [e534f6de34cb59a48842df5c90bc3db11dfa608b2f5ab4df9fd455d5a0bc5f86] <==
{"level":"info","ts":"2025-11-29T09:01:40.832264Z","caller":"etcdserver/server.go:738","msg":"started as single-node; fast-forwarding election ticks","local-member-id":"ea7e25599daad906","forward-ticks":9,"forward-duration":"900ms","election-ticks":10,"election-timeout":"1s"}
{"level":"info","ts":"2025-11-29T09:01:40.833809Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","added-peer-id":"ea7e25599daad906","added-peer-peer-urls":["https://192.168.76.2:2380"]}
{"level":"info","ts":"2025-11-29T09:01:40.834831Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-11-29T09:01:40.835134Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"ea7e25599daad906","initial-advertise-peer-urls":["https://192.168.76.2:2380"],"listen-peer-urls":["https://192.168.76.2:2380"],"advertise-client-urls":["https://192.168.76.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.76.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-29T09:01:40.835187Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-29T09:01:40.835365Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-11-29T09:01:40.835454Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-11-29T09:01:41.123873Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-29T09:01:41.123935Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-29T09:01:41.123975Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgPreVoteResp from ea7e25599daad906 at term 1"}
{"level":"info","ts":"2025-11-29T09:01:41.123993Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became candidate at term 2"}
{"level":"info","ts":"2025-11-29T09:01:41.124004Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgVoteResp from ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-29T09:01:41.124048Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became leader at term 2"}
{"level":"info","ts":"2025-11-29T09:01:41.124063Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: ea7e25599daad906 elected leader ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-29T09:01:41.125302Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-29T09:01:41.125326Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-29T09:01:41.125372Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:01:41.125276Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"ea7e25599daad906","local-member-attributes":"{Name:old-k8s-version-295154 ClientURLs:[https://192.168.76.2:2379]}","request-path":"/0/members/ea7e25599daad906/attributes","cluster-id":"6f20f2c4b2fb5f8a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-29T09:01:41.126456Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:01:41.126541Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:01:41.126567Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:01:41.126779Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.76.2:2379"}
{"level":"info","ts":"2025-11-29T09:01:41.127083Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-29T09:01:41.127112Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-29T09:01:41.126728Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
==> kernel <==
09:02:26 up 1:44, 0 user, load average: 2.70, 2.84, 12.45
Linux old-k8s-version-295154 6.8.0-1044-gcp #47~22.04.1-Ubuntu SMP Thu Oct 23 21:07:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [c556471fd7ebd161ba2d7b8d6bae271ee70e193598e07a1f28e7e4edb21ff0ac] <==
I1129 09:02:02.479657 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1129 09:02:02.479993 1 main.go:139] hostIP = 192.168.76.2
podIP = 192.168.76.2
I1129 09:02:02.480115 1 main.go:148] setting mtu 1500 for CNI
I1129 09:02:02.480129 1 main.go:178] kindnetd IP family: "ipv4"
I1129 09:02:02.480148 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-29T09:02:02Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1129 09:02:02.682312 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1129 09:02:02.682392 1 controller.go:381] "Waiting for informer caches to sync"
I1129 09:02:02.682406 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1129 09:02:02.682562 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1129 09:02:03.155518 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1129 09:02:03.155556 1 metrics.go:72] Registering metrics
I1129 09:02:03.155642 1 controller.go:711] "Syncing nftables rules"
I1129 09:02:12.691133 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1129 09:02:12.691191 1 main.go:301] handling current node
I1129 09:02:22.684230 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1129 09:02:22.684264 1 main.go:301] handling current node
==> kube-apiserver [c912b0431f5b96b6ae8d3df9e39af5a731f5b6f4a3128fbae403427258cd4010] <==
I1129 09:01:42.628432 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1129 09:01:42.628473 1 aggregator.go:166] initial CRD sync complete...
I1129 09:01:42.628487 1 autoregister_controller.go:141] Starting autoregister controller
I1129 09:01:42.628498 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1129 09:01:42.628507 1 cache.go:39] Caches are synced for autoregister controller
I1129 09:01:42.630276 1 controller.go:624] quota admission added evaluator for: namespaces
I1129 09:01:42.631842 1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
I1129 09:01:42.632653 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1129 09:01:42.633160 1 cache.go:39] Caches are synced for AvailableConditionController controller
I1129 09:01:42.675946 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1129 09:01:43.534299 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1129 09:01:43.538893 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1129 09:01:43.538914 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1129 09:01:44.048669 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1129 09:01:44.089332 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1129 09:01:44.139778 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1129 09:01:44.147964 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I1129 09:01:44.149152 1 controller.go:624] quota admission added evaluator for: endpoints
I1129 09:01:44.153475 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1129 09:01:44.583851 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1129 09:01:45.899683 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1129 09:01:45.911834 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1129 09:01:45.923913 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1129 09:01:58.190396 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1129 09:01:58.345309 1 controller.go:624] quota admission added evaluator for: replicasets.apps
==> kube-controller-manager [b3d9ef849b10991879886d480043efb13728841f71afc04d4c57f7bef3ceffc8] <==
I1129 09:01:57.601489 1 shared_informer.go:318] Caches are synced for HPA
I1129 09:01:57.641964 1 shared_informer.go:318] Caches are synced for resource quota
I1129 09:01:57.693466 1 shared_informer.go:318] Caches are synced for resource quota
I1129 09:01:58.013319 1 shared_informer.go:318] Caches are synced for garbage collector
I1129 09:01:58.081463 1 shared_informer.go:318] Caches are synced for garbage collector
I1129 09:01:58.081502 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1129 09:01:58.201293 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-k4n9l"
I1129 09:01:58.203642 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-4rfb4"
I1129 09:01:58.351467 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1129 09:01:58.446469 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-rjd8l"
I1129 09:01:58.457821 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-phw28"
I1129 09:01:58.472248 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="121.660505ms"
I1129 09:01:58.490138 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="17.818584ms"
I1129 09:01:58.490294 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="98.203µs"
I1129 09:01:58.749707 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1129 09:01:58.764048 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-rjd8l"
I1129 09:01:58.771830 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="24.493664ms"
I1129 09:01:58.778438 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="6.545401ms"
I1129 09:01:58.778711 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="56.414µs"
I1129 09:02:12.741856 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="137.043µs"
I1129 09:02:12.755154 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="122.723µs"
I1129 09:02:14.089302 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="163.286µs"
I1129 09:02:14.110178 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="8.287126ms"
I1129 09:02:14.110300 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="75.729µs"
I1129 09:02:17.447692 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [c3eb6059b5593e42d8e9ac6b43ac8b87e944eac5747f993c6bbca2acc16f180b] <==
I1129 09:01:58.837203 1 server_others.go:69] "Using iptables proxy"
I1129 09:01:58.847060 1 node.go:141] Successfully retrieved node IP: 192.168.76.2
I1129 09:01:58.872286 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1129 09:01:58.874956 1 server_others.go:152] "Using iptables Proxier"
I1129 09:01:58.875022 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1129 09:01:58.875038 1 server_others.go:438] "Defaulting to no-op detect-local"
I1129 09:01:58.875085 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1129 09:01:58.875423 1 server.go:846] "Version info" version="v1.28.0"
I1129 09:01:58.875446 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1129 09:01:58.877361 1 config.go:188] "Starting service config controller"
I1129 09:01:58.877426 1 shared_informer.go:311] Waiting for caches to sync for service config
I1129 09:01:58.878055 1 config.go:97] "Starting endpoint slice config controller"
I1129 09:01:58.878080 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1129 09:01:58.878567 1 config.go:315] "Starting node config controller"
I1129 09:01:58.878812 1 shared_informer.go:311] Waiting for caches to sync for node config
I1129 09:01:58.977719 1 shared_informer.go:318] Caches are synced for service config
I1129 09:01:58.978897 1 shared_informer.go:318] Caches are synced for endpoint slice config
I1129 09:01:58.979002 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [ec1e8ae808249468b5a57a4c1aa02a0700a8af9e46e3b394b96fda393ef3531b] <==
E1129 09:01:42.591266 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1129 09:01:42.591281 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W1129 09:01:43.438322 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1129 09:01:43.438354 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1129 09:01:43.459244 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1129 09:01:43.459274 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1129 09:01:43.466076 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1129 09:01:43.466111 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W1129 09:01:43.467104 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1129 09:01:43.467131 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1129 09:01:43.496506 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1129 09:01:43.496554 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1129 09:01:43.745308 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1129 09:01:43.745358 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1129 09:01:43.782232 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1129 09:01:43.782279 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1129 09:01:43.784711 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1129 09:01:43.784785 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1129 09:01:43.822287 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1129 09:01:43.822413 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1129 09:01:43.831935 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1129 09:01:43.831979 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1129 09:01:44.009190 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1129 09:01:44.009227 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
I1129 09:01:46.586725 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 29 09:01:57 old-k8s-version-295154 kubelet[1505]: I1129 09:01:57.557701 1505 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.211770 1505 topology_manager.go:215] "Topology Admit Handler" podUID="74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8" podNamespace="kube-system" podName="kindnet-k4n9l"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.211977 1505 topology_manager.go:215] "Topology Admit Handler" podUID="05ef67c3-0d6e-453d-a0e5-81c649c3e033" podNamespace="kube-system" podName="kube-proxy-4rfb4"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.245664 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvjhl\" (UniqueName: \"kubernetes.io/projected/74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8-kube-api-access-kvjhl\") pod \"kindnet-k4n9l\" (UID: \"74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8\") " pod="kube-system/kindnet-k4n9l"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.245757 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8-cni-cfg\") pod \"kindnet-k4n9l\" (UID: \"74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8\") " pod="kube-system/kindnet-k4n9l"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.245804 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8-lib-modules\") pod \"kindnet-k4n9l\" (UID: \"74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8\") " pod="kube-system/kindnet-k4n9l"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.245867 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/05ef67c3-0d6e-453d-a0e5-81c649c3e033-xtables-lock\") pod \"kube-proxy-4rfb4\" (UID: \"05ef67c3-0d6e-453d-a0e5-81c649c3e033\") " pod="kube-system/kube-proxy-4rfb4"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.245918 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/05ef67c3-0d6e-453d-a0e5-81c649c3e033-lib-modules\") pod \"kube-proxy-4rfb4\" (UID: \"05ef67c3-0d6e-453d-a0e5-81c649c3e033\") " pod="kube-system/kube-proxy-4rfb4"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.245964 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/05ef67c3-0d6e-453d-a0e5-81c649c3e033-kube-proxy\") pod \"kube-proxy-4rfb4\" (UID: \"05ef67c3-0d6e-453d-a0e5-81c649c3e033\") " pod="kube-system/kube-proxy-4rfb4"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.245999 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8-xtables-lock\") pod \"kindnet-k4n9l\" (UID: \"74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8\") " pod="kube-system/kindnet-k4n9l"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.246031 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6tpd\" (UniqueName: \"kubernetes.io/projected/05ef67c3-0d6e-453d-a0e5-81c649c3e033-kube-api-access-l6tpd\") pod \"kube-proxy-4rfb4\" (UID: \"05ef67c3-0d6e-453d-a0e5-81c649c3e033\") " pod="kube-system/kube-proxy-4rfb4"
Nov 29 09:01:59 old-k8s-version-295154 kubelet[1505]: I1129 09:01:59.051481 1505 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-4rfb4" podStartSLOduration=1.051403893 podCreationTimestamp="2025-11-29 09:01:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 09:01:59.051034434 +0000 UTC m=+13.185091147" watchObservedRunningTime="2025-11-29 09:01:59.051403893 +0000 UTC m=+13.185460607"
Nov 29 09:02:03 old-k8s-version-295154 kubelet[1505]: I1129 09:02:03.075069 1505 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-k4n9l" podStartSLOduration=1.8021440370000001 podCreationTimestamp="2025-11-29 09:01:58 +0000 UTC" firstStartedPulling="2025-11-29 09:01:58.884230342 +0000 UTC m=+13.018287046" lastFinishedPulling="2025-11-29 09:02:02.157002868 +0000 UTC m=+16.291059564" observedRunningTime="2025-11-29 09:02:03.074620988 +0000 UTC m=+17.208677701" watchObservedRunningTime="2025-11-29 09:02:03.074916555 +0000 UTC m=+17.208973271"
Nov 29 09:02:12 old-k8s-version-295154 kubelet[1505]: I1129 09:02:12.718189 1505 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 29 09:02:12 old-k8s-version-295154 kubelet[1505]: I1129 09:02:12.741770 1505 topology_manager.go:215] "Topology Admit Handler" podUID="7fc2b8dd-43dd-43df-8887-9ffa6de36fb4" podNamespace="kube-system" podName="coredns-5dd5756b68-phw28"
Nov 29 09:02:12 old-k8s-version-295154 kubelet[1505]: I1129 09:02:12.742156 1505 topology_manager.go:215] "Topology Admit Handler" podUID="359871fd-a77c-430a-87c1-b313992718e2" podNamespace="kube-system" podName="storage-provisioner"
Nov 29 09:02:12 old-k8s-version-295154 kubelet[1505]: I1129 09:02:12.838446 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sztkn\" (UniqueName: \"kubernetes.io/projected/7fc2b8dd-43dd-43df-8887-9ffa6de36fb4-kube-api-access-sztkn\") pod \"coredns-5dd5756b68-phw28\" (UID: \"7fc2b8dd-43dd-43df-8887-9ffa6de36fb4\") " pod="kube-system/coredns-5dd5756b68-phw28"
Nov 29 09:02:12 old-k8s-version-295154 kubelet[1505]: I1129 09:02:12.838527 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ghrm\" (UniqueName: \"kubernetes.io/projected/359871fd-a77c-430a-87c1-b313992718e2-kube-api-access-2ghrm\") pod \"storage-provisioner\" (UID: \"359871fd-a77c-430a-87c1-b313992718e2\") " pod="kube-system/storage-provisioner"
Nov 29 09:02:12 old-k8s-version-295154 kubelet[1505]: I1129 09:02:12.838708 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7fc2b8dd-43dd-43df-8887-9ffa6de36fb4-config-volume\") pod \"coredns-5dd5756b68-phw28\" (UID: \"7fc2b8dd-43dd-43df-8887-9ffa6de36fb4\") " pod="kube-system/coredns-5dd5756b68-phw28"
Nov 29 09:02:12 old-k8s-version-295154 kubelet[1505]: I1129 09:02:12.838811 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/359871fd-a77c-430a-87c1-b313992718e2-tmp\") pod \"storage-provisioner\" (UID: \"359871fd-a77c-430a-87c1-b313992718e2\") " pod="kube-system/storage-provisioner"
Nov 29 09:02:14 old-k8s-version-295154 kubelet[1505]: I1129 09:02:14.089000 1505 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-phw28" podStartSLOduration=16.088943107 podCreationTimestamp="2025-11-29 09:01:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 09:02:14.088869179 +0000 UTC m=+28.222925894" watchObservedRunningTime="2025-11-29 09:02:14.088943107 +0000 UTC m=+28.222999821"
Nov 29 09:02:14 old-k8s-version-295154 kubelet[1505]: I1129 09:02:14.111723 1505 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=16.111665904 podCreationTimestamp="2025-11-29 09:01:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 09:02:14.111613929 +0000 UTC m=+28.245670654" watchObservedRunningTime="2025-11-29 09:02:14.111665904 +0000 UTC m=+28.245722610"
Nov 29 09:02:16 old-k8s-version-295154 kubelet[1505]: I1129 09:02:16.130277 1505 topology_manager.go:215] "Topology Admit Handler" podUID="54baf2f4-8de5-4f66-92ac-f5315174d940" podNamespace="default" podName="busybox"
Nov 29 09:02:16 old-k8s-version-295154 kubelet[1505]: I1129 09:02:16.160532 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wj46k\" (UniqueName: \"kubernetes.io/projected/54baf2f4-8de5-4f66-92ac-f5315174d940-kube-api-access-wj46k\") pod \"busybox\" (UID: \"54baf2f4-8de5-4f66-92ac-f5315174d940\") " pod="default/busybox"
Nov 29 09:02:20 old-k8s-version-295154 kubelet[1505]: I1129 09:02:20.102644 1505 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.465512975 podCreationTimestamp="2025-11-29 09:02:16 +0000 UTC" firstStartedPulling="2025-11-29 09:02:16.555803596 +0000 UTC m=+30.689860305" lastFinishedPulling="2025-11-29 09:02:19.192874383 +0000 UTC m=+33.326931083" observedRunningTime="2025-11-29 09:02:20.102453338 +0000 UTC m=+34.236510058" watchObservedRunningTime="2025-11-29 09:02:20.102583753 +0000 UTC m=+34.236640469"
==> storage-provisioner [c2b64aca34f8b72337fd1dd9bda969ab607f739b3b5bd64a9962706bb51f1368] <==
I1129 09:02:13.242146 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1129 09:02:13.250320 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1129 09:02:13.250375 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1129 09:02:13.260646 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1129 09:02:13.260835 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"3d38b917-49d9-4ce8-b6d4-33e78e4354a6", APIVersion:"v1", ResourceVersion:"393", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-295154_6170b45d-8612-41e5-bb3d-e5fe156c196d became leader
I1129 09:02:13.260885 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-295154_6170b45d-8612-41e5-bb3d-e5fe156c196d!
I1129 09:02:13.362157 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-295154_6170b45d-8612-41e5-bb3d-e5fe156c196d!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-295154 -n old-k8s-version-295154
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-295154 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-295154
helpers_test.go:243: (dbg) docker inspect old-k8s-version-295154:
-- stdout --
[
{
"Id": "1d2dc93defe08823e969abc1083166e5b987c49003d867c47f6dab538c73042e",
"Created": "2025-11-29T09:01:32.670265754Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 494787,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-29T09:01:32.709136408Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:133ca4ac39008d0056ad45d8cb70521d6b70d6e1b8bbff4678fd4b354efbdf70",
"ResolvConfPath": "/var/lib/docker/containers/1d2dc93defe08823e969abc1083166e5b987c49003d867c47f6dab538c73042e/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/1d2dc93defe08823e969abc1083166e5b987c49003d867c47f6dab538c73042e/hostname",
"HostsPath": "/var/lib/docker/containers/1d2dc93defe08823e969abc1083166e5b987c49003d867c47f6dab538c73042e/hosts",
"LogPath": "/var/lib/docker/containers/1d2dc93defe08823e969abc1083166e5b987c49003d867c47f6dab538c73042e/1d2dc93defe08823e969abc1083166e5b987c49003d867c47f6dab538c73042e-json.log",
"Name": "/old-k8s-version-295154",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-295154:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-295154",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "1d2dc93defe08823e969abc1083166e5b987c49003d867c47f6dab538c73042e",
"LowerDir": "/var/lib/docker/overlay2/10e010eea53c4090a92173793351457113c92b95e4addfb0007c310be02782d4-init/diff:/var/lib/docker/overlay2/eb180691bce18b8d981b2d61ed0962851c615364ed77c18ff66d559424569005/diff",
"MergedDir": "/var/lib/docker/overlay2/10e010eea53c4090a92173793351457113c92b95e4addfb0007c310be02782d4/merged",
"UpperDir": "/var/lib/docker/overlay2/10e010eea53c4090a92173793351457113c92b95e4addfb0007c310be02782d4/diff",
"WorkDir": "/var/lib/docker/overlay2/10e010eea53c4090a92173793351457113c92b95e4addfb0007c310be02782d4/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "old-k8s-version-295154",
"Source": "/var/lib/docker/volumes/old-k8s-version-295154/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "old-k8s-version-295154",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-295154",
"name.minikube.sigs.k8s.io": "old-k8s-version-295154",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "d61dde634f57a1405987eb1bcb1468d94550e880fe30f55b1f686d12c8c280ee",
"SandboxKey": "/var/run/docker/netns/d61dde634f57",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33058"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33059"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33062"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33060"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33061"
}
]
},
"Networks": {
"old-k8s-version-295154": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "aea341d97cf5d4f6668e24ade3efa38cebbca9060f995994226a6ded161b076c",
"EndpointID": "7f306b5e076751e147ce07bdf687dd5284be41e6bffcdf4542e80d7a90deb9e2",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"MacAddress": "e6:d5:92:ca:f6:04",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-295154",
"1d2dc93defe0"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-295154 -n old-k8s-version-295154
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-295154 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-295154 logs -n 25: (1.135224056s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────┬─────────┬─────────┬─────────────────────┬────────────
─────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────┼─────────┼─────────┼─────────────────────┼────────────
─────────┤
│ ssh │ -p cilium-770004 sudo systemctl status containerd --all --full --no-pager │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo systemctl cat containerd --no-pager │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo cat /lib/systemd/system/containerd.service │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo cat /etc/containerd/config.toml │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo containerd config dump │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo systemctl status crio --all --full --no-pager │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo systemctl cat crio --no-pager │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ ssh │ -p cilium-770004 sudo crio config │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ │
│ delete │ -p cilium-770004 │ cilium-770004 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ 29 Nov 25 09:00 UTC │
│ start │ -p force-systemd-env-693869 --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-env-693869 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ 29 Nov 25 09:01 UTC │
│ start │ -p pause-563162 --alsologtostderr -v=1 --driver=docker --container-runtime=containerd │ pause-563162 │ jenkins │ v1.37.0 │ 29 Nov 25 09:00 UTC │ 29 Nov 25 09:01 UTC │
│ ssh │ force-systemd-env-693869 ssh cat /etc/containerd/config.toml │ force-systemd-env-693869 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ delete │ -p force-systemd-env-693869 │ force-systemd-env-693869 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ start │ -p cert-options-536258 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-536258 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ pause │ -p pause-563162 --alsologtostderr -v=5 │ pause-563162 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ unpause │ -p pause-563162 --alsologtostderr -v=5 │ pause-563162 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ pause │ -p pause-563162 --alsologtostderr -v=5 │ pause-563162 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ delete │ -p pause-563162 --alsologtostderr -v=5 │ pause-563162 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ ssh │ cert-options-536258 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-536258 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ ssh │ -p cert-options-536258 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-536258 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ delete │ -p cert-options-536258 │ cert-options-536258 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ delete │ -p pause-563162 │ pause-563162 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:01 UTC │
│ start │ -p old-k8s-version-295154 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-295154 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:02 UTC │
│ start │ -p no-preload-924441 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ no-preload-924441 │ jenkins │ v1.37.0 │ 29 Nov 25 09:01 UTC │ 29 Nov 25 09:02 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────┴─────────┴─────────┴─────────────────────┴────────────
─────────┘
==> Last Start <==
Log file created at: 2025/11/29 09:01:29
Running on machine: ubuntu-20-agent-10
Binary: Built with gc go1.25.3 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1129 09:01:26.371812 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:01:26.372231 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:01:26.372304 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:01:26.372374 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:01:26.406988 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:26.407016 460401 cri.go:89] found id: "40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac"
I1129 09:01:26.407022 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:26.407027 460401 cri.go:89] found id: ""
I1129 09:01:26.407038 460401 logs.go:282] 3 containers: [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:01:26.407111 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.413707 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.419492 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.424920 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:01:26.424999 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:01:26.456369 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:26.456395 460401 cri.go:89] found id: ""
I1129 09:01:26.456406 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:01:26.456466 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.462064 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:01:26.462133 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:01:26.492837 460401 cri.go:89] found id: ""
I1129 09:01:26.492868 460401 logs.go:282] 0 containers: []
W1129 09:01:26.492879 460401 logs.go:284] No container was found matching "coredns"
I1129 09:01:26.492887 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:01:26.492955 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:01:26.521715 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:26.521747 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:26.521754 460401 cri.go:89] found id: ""
I1129 09:01:26.521763 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:01:26.521821 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.526872 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.531295 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:01:26.531353 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:01:26.558218 460401 cri.go:89] found id: ""
I1129 09:01:26.558248 460401 logs.go:282] 0 containers: []
W1129 09:01:26.558257 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:01:26.558264 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:01:26.558313 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:01:26.587221 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:26.587246 460401 cri.go:89] found id: "f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:26.587253 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:26.587258 460401 cri.go:89] found id: ""
I1129 09:01:26.587268 460401 logs.go:282] 3 containers: [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:01:26.587328 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.591954 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.596055 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:26.600163 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:01:26.600219 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:01:26.628586 460401 cri.go:89] found id: ""
I1129 09:01:26.628613 460401 logs.go:282] 0 containers: []
W1129 09:01:26.628624 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:01:26.628633 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:01:26.628690 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:01:26.657553 460401 cri.go:89] found id: ""
I1129 09:01:26.657581 460401 logs.go:282] 0 containers: []
W1129 09:01:26.657591 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:01:26.657603 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:01:26.657622 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:01:26.721559 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:01:26.721584 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:01:26.721601 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:26.756136 460401 logs.go:123] Gathering logs for kube-controller-manager [f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00] ...
I1129 09:01:26.756165 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:26.787789 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:01:26.787827 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:01:26.838908 460401 logs.go:123] Gathering logs for container status ...
I1129 09:01:26.838943 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:01:26.875689 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:01:26.875723 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:01:26.946907 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:01:26.946941 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:26.982883 460401 logs.go:123] Gathering logs for kube-apiserver [40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac] ...
I1129 09:01:26.982919 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac"
W1129 09:01:27.012923 460401 logs.go:130] failed kube-apiserver [40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac]: command: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac" /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac": Process exited with status 1
stdout:
stderr:
E1129 09:01:27.010611 2688 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac\": not found" containerID="40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac"
time="2025-11-29T09:01:27Z" level=fatal msg="rpc error: code = NotFound desc = an error occurred when try to find container \"40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac\": not found"
output:
** stderr **
E1129 09:01:27.010611 2688 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac\": not found" containerID="40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac"
time="2025-11-29T09:01:27Z" level=fatal msg="rpc error: code = NotFound desc = an error occurred when try to find container \"40c6f3e103ae72dbb12c815df4659a1277b1a92060d18c5eb8f7b2d5365f14ac\": not found"
** /stderr **
I1129 09:01:27.012941 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:01:27.012953 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:27.051493 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:01:27.051526 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:27.089722 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:01:27.089755 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:27.138471 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:01:27.138504 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:27.172932 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:01:27.172962 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:27.207844 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:01:27.207878 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:01:29.500031 494126 out.go:360] Setting OutFile to fd 1 ...
I1129 09:01:29.500142 494126 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1129 09:01:29.500153 494126 out.go:374] Setting ErrFile to fd 2...
I1129 09:01:29.500159 494126 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1129 09:01:29.500372 494126 root.go:338] Updating PATH: /home/jenkins/minikube-integration/22000-255825/.minikube/bin
I1129 09:01:29.500882 494126 out.go:368] Setting JSON to false
I1129 09:01:29.501996 494126 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-10","uptime":6233,"bootTime":1764400656,"procs":294,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1129 09:01:29.502070 494126 start.go:143] virtualization: kvm guest
I1129 09:01:29.506976 494126 out.go:179] * [no-preload-924441] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1129 09:01:29.508162 494126 out.go:179] - MINIKUBE_LOCATION=22000
I1129 09:01:29.508182 494126 notify.go:221] Checking for updates...
I1129 09:01:29.510318 494126 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1129 09:01:29.511334 494126 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/22000-255825/kubeconfig
I1129 09:01:29.516252 494126 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/22000-255825/.minikube
I1129 09:01:29.517321 494126 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1129 09:01:29.518374 494126 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1129 09:01:29.519877 494126 config.go:182] Loaded profile config "cert-expiration-368536": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1129 09:01:29.519989 494126 config.go:182] Loaded profile config "kubernetes-upgrade-806701": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1129 09:01:29.520095 494126 config.go:182] Loaded profile config "old-k8s-version-295154": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1129 09:01:29.520225 494126 driver.go:422] Setting default libvirt URI to qemu:///system
I1129 09:01:29.546023 494126 docker.go:124] docker version: linux-29.1.1:Docker Engine - Community
I1129 09:01:29.546141 494126 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1129 09:01:29.607775 494126 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:63 OomKillDisable:false NGoroutines:81 SystemTime:2025-11-29 09:01:29.596891851 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652068352 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:29.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:1c4457e00facac03ce1d75f7b6777a7a851e5c41 Expected:} RuncCommit:{ID:v1.3.4-0-gd6d73eb8 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1129 09:01:29.607908 494126 docker.go:319] overlay module found
I1129 09:01:29.610288 494126 out.go:179] * Using the docker driver based on user configuration
I1129 09:01:29.611200 494126 start.go:309] selected driver: docker
I1129 09:01:29.611220 494126 start.go:927] validating driver "docker" against <nil>
I1129 09:01:29.611231 494126 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1129 09:01:29.611850 494126 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1129 09:01:29.673266 494126 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:63 OomKillDisable:false NGoroutines:81 SystemTime:2025-11-29 09:01:29.662655452 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652068352 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:29.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:1c4457e00facac03ce1d75f7b6777a7a851e5c41 Expected:} RuncCommit:{ID:v1.3.4-0-gd6d73eb8 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1129 09:01:29.673484 494126 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1129 09:01:29.673822 494126 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1129 09:01:29.675454 494126 out.go:179] * Using Docker driver with root privileges
I1129 09:01:29.679127 494126 cni.go:84] Creating CNI manager for ""
I1129 09:01:29.679243 494126 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1129 09:01:29.679264 494126 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1129 09:01:29.679351 494126 start.go:353] cluster config:
{Name:no-preload-924441 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-924441 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Containe
rRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1129 09:01:29.680591 494126 out.go:179] * Starting "no-preload-924441" primary control-plane node in "no-preload-924441" cluster
I1129 09:01:29.681517 494126 cache.go:134] Beginning downloading kic base image for docker with containerd
I1129 09:01:29.682533 494126 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1129 09:01:29.683845 494126 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1129 09:01:29.683975 494126 profile.go:143] Saving config to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/config.json ...
I1129 09:01:29.683971 494126 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1129 09:01:29.684042 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/config.json: {Name:mk4df9140f26fdbfe5b2addb71b44607d26b26a3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:29.684181 494126 cache.go:107] acquiring lock: {Name:mka90f7eac55a6e5d6d9651fc108f327509b562f Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684233 494126 cache.go:107] acquiring lock: {Name:mk2c250a4202b546a18f0cc7664314439a4ec834 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684259 494126 cache.go:107] acquiring lock: {Name:mk976aaa4e01b0c9e83cc6925b8c3c72804bfa25 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684288 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 exists
I1129 09:01:29.684299 494126 cache.go:96] cache image "gcr.io/k8s-minikube/storage-provisioner:v5" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5" took 144.373µs
I1129 09:01:29.684315 494126 cache.go:80] save to tar file gcr.io/k8s-minikube/storage-provisioner:v5 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 succeeded
I1129 09:01:29.684321 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 exists
I1129 09:01:29.684322 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 exists
I1129 09:01:29.684332 494126 cache.go:96] cache image "registry.k8s.io/kube-apiserver:v1.34.1" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1" took 80.37µs
I1129 09:01:29.684333 494126 cache.go:96] cache image "registry.k8s.io/kube-proxy:v1.34.1" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1" took 119.913µs
I1129 09:01:29.684341 494126 cache.go:80] save to tar file registry.k8s.io/kube-apiserver:v1.34.1 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 succeeded
I1129 09:01:29.684344 494126 cache.go:80] save to tar file registry.k8s.io/kube-proxy:v1.34.1 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 succeeded
I1129 09:01:29.684332 494126 cache.go:107] acquiring lock: {Name:mkff44f5b6b961ddaa9acc3e74cf0480b0d2f776 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684358 494126 cache.go:107] acquiring lock: {Name:mk6080f4393a19fb5c4d6f436dce1a2bb1688f86 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684378 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 exists
I1129 09:01:29.684387 494126 cache.go:96] cache image "registry.k8s.io/coredns/coredns:v1.12.1" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1" took 58.113µs
I1129 09:01:29.684395 494126 cache.go:80] save to tar file registry.k8s.io/coredns/coredns:v1.12.1 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 succeeded
I1129 09:01:29.684399 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 exists
I1129 09:01:29.684282 494126 cache.go:107] acquiring lock: {Name:mkb8e7a67c98a0b8caa208116d415323f5ca7ccc Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684410 494126 cache.go:107] acquiring lock: {Name:mk47ee24ca074cb6cc1a641d737215686b099dc0 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684472 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 exists
I1129 09:01:29.684482 494126 cache.go:96] cache image "registry.k8s.io/kube-controller-manager:v1.34.1" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1" took 217.393µs
I1129 09:01:29.684492 494126 cache.go:80] save to tar file registry.k8s.io/kube-controller-manager:v1.34.1 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 succeeded
I1129 09:01:29.684416 494126 cache.go:96] cache image "registry.k8s.io/pause:3.10.1" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1" took 61.464µs
I1129 09:01:29.684504 494126 cache.go:80] save to tar file registry.k8s.io/pause:3.10.1 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 succeeded
I1129 09:01:29.684517 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 exists
I1129 09:01:29.684533 494126 cache.go:96] cache image "registry.k8s.io/kube-scheduler:v1.34.1" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1" took 171.692µs
I1129 09:01:29.684552 494126 cache.go:80] save to tar file registry.k8s.io/kube-scheduler:v1.34.1 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 succeeded
I1129 09:01:29.684643 494126 cache.go:107] acquiring lock: {Name:mk912246de843459c104f342794e23ecb1fc7a75 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.684790 494126 cache.go:115] /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 exists
I1129 09:01:29.684806 494126 cache.go:96] cache image "registry.k8s.io/etcd:3.6.4-0" -> "/home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0" took 226.111µs
I1129 09:01:29.684824 494126 cache.go:80] save to tar file registry.k8s.io/etcd:3.6.4-0 -> /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 succeeded
I1129 09:01:29.684840 494126 cache.go:87] Successfully saved all images to host disk.
I1129 09:01:29.706829 494126 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1129 09:01:29.706854 494126 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1129 09:01:29.706878 494126 cache.go:243] Successfully downloaded all kic artifacts
I1129 09:01:29.706918 494126 start.go:360] acquireMachinesLock for no-preload-924441: {Name:mkf9f3b6b30f178cf9b9d50a2dabce8e2c5d48f0 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:01:29.707056 494126 start.go:364] duration metric: took 99.455µs to acquireMachinesLock for "no-preload-924441"
I1129 09:01:29.707090 494126 start.go:93] Provisioning new machine with config: &{Name:no-preload-924441 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-924441 Namespace:default APIServerHAVIP: APIServer
Name:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1129 09:01:29.707206 494126 start.go:125] createHost starting for "" (driver="docker")
I1129 09:01:28.461537 493486 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1129 09:01:28.461867 493486 start.go:159] libmachine.API.Create for "old-k8s-version-295154" (driver="docker")
I1129 09:01:28.461917 493486 client.go:173] LocalClient.Create starting
I1129 09:01:28.462009 493486 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem
I1129 09:01:28.462065 493486 main.go:143] libmachine: Decoding PEM data...
I1129 09:01:28.462089 493486 main.go:143] libmachine: Parsing certificate...
I1129 09:01:28.462160 493486 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem
I1129 09:01:28.462186 493486 main.go:143] libmachine: Decoding PEM data...
I1129 09:01:28.462205 493486 main.go:143] libmachine: Parsing certificate...
I1129 09:01:28.462679 493486 cli_runner.go:164] Run: docker network inspect old-k8s-version-295154 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1129 09:01:28.481658 493486 cli_runner.go:211] docker network inspect old-k8s-version-295154 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1129 09:01:28.481745 493486 network_create.go:284] running [docker network inspect old-k8s-version-295154] to gather additional debugging logs...
I1129 09:01:28.481770 493486 cli_runner.go:164] Run: docker network inspect old-k8s-version-295154
W1129 09:01:28.500619 493486 cli_runner.go:211] docker network inspect old-k8s-version-295154 returned with exit code 1
I1129 09:01:28.500661 493486 network_create.go:287] error running [docker network inspect old-k8s-version-295154]: docker network inspect old-k8s-version-295154: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-295154 not found
I1129 09:01:28.500677 493486 network_create.go:289] output of [docker network inspect old-k8s-version-295154]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-295154 not found
** /stderr **
I1129 09:01:28.500849 493486 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1129 09:01:28.518426 493486 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-f69c672bf913 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:26:40:f4:ed:4f:ab} reservation:<nil>}
I1129 09:01:28.519384 493486 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-96d20aff5877 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:c2:01:e2:a3:b8:33} reservation:<nil>}
I1129 09:01:28.520407 493486 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-f7906c56f869 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:06:29:75:e3:e0:7f} reservation:<nil>}
I1129 09:01:28.521974 493486 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001f90700}
I1129 09:01:28.522028 493486 network_create.go:124] attempt to create docker network old-k8s-version-295154 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I1129 09:01:28.522109 493486 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-295154 old-k8s-version-295154
I1129 09:01:28.575478 493486 network_create.go:108] docker network old-k8s-version-295154 192.168.76.0/24 created
I1129 09:01:28.575522 493486 kic.go:121] calculated static IP "192.168.76.2" for the "old-k8s-version-295154" container
I1129 09:01:28.575603 493486 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1129 09:01:28.593666 493486 cli_runner.go:164] Run: docker volume create old-k8s-version-295154 --label name.minikube.sigs.k8s.io=old-k8s-version-295154 --label created_by.minikube.sigs.k8s.io=true
I1129 09:01:28.612389 493486 oci.go:103] Successfully created a docker volume old-k8s-version-295154
I1129 09:01:28.612501 493486 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-295154-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-295154 --entrypoint /usr/bin/test -v old-k8s-version-295154:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1129 09:01:29.238109 493486 oci.go:107] Successfully prepared a docker volume old-k8s-version-295154
I1129 09:01:29.238162 493486 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1129 09:01:29.238176 493486 kic.go:194] Starting extracting preloaded images to volume ...
I1129 09:01:29.238241 493486 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/22000-255825/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-295154:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir
I1129 09:01:32.586626 493486 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/22000-255825/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-295154:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir: (3.348341473s)
I1129 09:01:32.586660 493486 kic.go:203] duration metric: took 3.348481997s to extract preloaded images to volume ...
W1129 09:01:32.586761 493486 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1129 09:01:32.586805 493486 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1129 09:01:32.586861 493486 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1129 09:01:32.650922 493486 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-295154 --name old-k8s-version-295154 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-295154 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-295154 --network old-k8s-version-295154 --ip 192.168.76.2 --volume old-k8s-version-295154:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1129 09:01:32.982372 493486 cli_runner.go:164] Run: docker container inspect old-k8s-version-295154 --format={{.State.Running}}
I1129 09:01:33.001073 493486 cli_runner.go:164] Run: docker container inspect old-k8s-version-295154 --format={{.State.Status}}
I1129 09:01:33.021021 493486 cli_runner.go:164] Run: docker exec old-k8s-version-295154 stat /var/lib/dpkg/alternatives/iptables
I1129 09:01:33.078706 493486 oci.go:144] the created container "old-k8s-version-295154" has a running status.
I1129 09:01:33.078890 493486 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa...
I1129 09:01:33.213970 493486 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1129 09:01:33.251103 493486 cli_runner.go:164] Run: docker container inspect old-k8s-version-295154 --format={{.State.Status}}
I1129 09:01:29.709142 494126 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1129 09:01:29.709367 494126 start.go:159] libmachine.API.Create for "no-preload-924441" (driver="docker")
I1129 09:01:29.709398 494126 client.go:173] LocalClient.Create starting
I1129 09:01:29.709475 494126 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem
I1129 09:01:29.709526 494126 main.go:143] libmachine: Decoding PEM data...
I1129 09:01:29.709553 494126 main.go:143] libmachine: Parsing certificate...
I1129 09:01:29.709629 494126 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem
I1129 09:01:29.709661 494126 main.go:143] libmachine: Decoding PEM data...
I1129 09:01:29.709679 494126 main.go:143] libmachine: Parsing certificate...
I1129 09:01:29.710082 494126 cli_runner.go:164] Run: docker network inspect no-preload-924441 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1129 09:01:29.727862 494126 cli_runner.go:211] docker network inspect no-preload-924441 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1129 09:01:29.727982 494126 network_create.go:284] running [docker network inspect no-preload-924441] to gather additional debugging logs...
I1129 09:01:29.728011 494126 cli_runner.go:164] Run: docker network inspect no-preload-924441
W1129 09:01:29.747053 494126 cli_runner.go:211] docker network inspect no-preload-924441 returned with exit code 1
I1129 09:01:29.747092 494126 network_create.go:287] error running [docker network inspect no-preload-924441]: docker network inspect no-preload-924441: exit status 1
stdout:
[]
stderr:
Error response from daemon: network no-preload-924441 not found
I1129 09:01:29.747129 494126 network_create.go:289] output of [docker network inspect no-preload-924441]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network no-preload-924441 not found
** /stderr **
I1129 09:01:29.747297 494126 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1129 09:01:29.769138 494126 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-f69c672bf913 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:26:40:f4:ed:4f:ab} reservation:<nil>}
I1129 09:01:29.769961 494126 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-96d20aff5877 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:c2:01:e2:a3:b8:33} reservation:<nil>}
I1129 09:01:29.770795 494126 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-f7906c56f869 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:06:29:75:e3:e0:7f} reservation:<nil>}
I1129 09:01:29.771440 494126 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-aea341d97cf5 IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:ea:fb:22:ff:e0:65} reservation:<nil>}
I1129 09:01:29.771972 494126 network.go:211] skipping subnet 192.168.85.0/24 that is taken: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName:br-5ec7c7346e1b IfaceIPv4:192.168.85.1 IfaceMTU:1500 IfaceMAC:f6:a5:df:dd:c8:cf} reservation:<nil>}
I1129 09:01:29.772536 494126 network.go:211] skipping subnet 192.168.94.0/24 that is taken: &{IP:192.168.94.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.94.0/24 Gateway:192.168.94.1 ClientMin:192.168.94.2 ClientMax:192.168.94.254 Broadcast:192.168.94.255 IsPrivate:true Interface:{IfaceName:br-ede9a8c5c6b0 IfaceIPv4:192.168.94.1 IfaceMTU:1500 IfaceMAC:3e:6e:06:75:02:7a} reservation:<nil>}
I1129 09:01:29.773382 494126 network.go:206] using free private subnet 192.168.103.0/24: &{IP:192.168.103.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.103.0/24 Gateway:192.168.103.1 ClientMin:192.168.103.2 ClientMax:192.168.103.254 Broadcast:192.168.103.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc00201aa40}
I1129 09:01:29.773412 494126 network_create.go:124] attempt to create docker network no-preload-924441 192.168.103.0/24 with gateway 192.168.103.1 and MTU of 1500 ...
I1129 09:01:29.773492 494126 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.103.0/24 --gateway=192.168.103.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=no-preload-924441 no-preload-924441
I1129 09:01:29.826699 494126 network_create.go:108] docker network no-preload-924441 192.168.103.0/24 created
I1129 09:01:29.826822 494126 kic.go:121] calculated static IP "192.168.103.2" for the "no-preload-924441" container
I1129 09:01:29.826907 494126 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1129 09:01:29.848520 494126 cli_runner.go:164] Run: docker volume create no-preload-924441 --label name.minikube.sigs.k8s.io=no-preload-924441 --label created_by.minikube.sigs.k8s.io=true
I1129 09:01:29.870388 494126 oci.go:103] Successfully created a docker volume no-preload-924441
I1129 09:01:29.870496 494126 cli_runner.go:164] Run: docker run --rm --name no-preload-924441-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-924441 --entrypoint /usr/bin/test -v no-preload-924441:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1129 09:01:32.848045 494126 cli_runner.go:217] Completed: docker run --rm --name no-preload-924441-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-924441 --entrypoint /usr/bin/test -v no-preload-924441:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib: (2.977502795s)
I1129 09:01:32.848077 494126 oci.go:107] Successfully prepared a docker volume no-preload-924441
I1129 09:01:32.848131 494126 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
W1129 09:01:32.848227 494126 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1129 09:01:32.848271 494126 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1129 09:01:32.848312 494126 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1129 09:01:32.909124 494126 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname no-preload-924441 --name no-preload-924441 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-924441 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=no-preload-924441 --network no-preload-924441 --ip 192.168.103.2 --volume no-preload-924441:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1129 09:01:33.229639 494126 cli_runner.go:164] Run: docker container inspect no-preload-924441 --format={{.State.Running}}
I1129 09:01:33.257967 494126 cli_runner.go:164] Run: docker container inspect no-preload-924441 --format={{.State.Status}}
I1129 09:01:33.283525 494126 cli_runner.go:164] Run: docker exec no-preload-924441 stat /var/lib/dpkg/alternatives/iptables
I1129 09:01:33.358911 494126 oci.go:144] the created container "no-preload-924441" has a running status.
I1129 09:01:33.358964 494126 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa...
I1129 09:01:33.456248 494126 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1129 09:01:33.491041 494126 cli_runner.go:164] Run: docker container inspect no-preload-924441 --format={{.State.Status}}
I1129 09:01:33.515555 494126 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1129 09:01:33.515581 494126 kic_runner.go:114] Args: [docker exec --privileged no-preload-924441 chown docker:docker /home/docker/.ssh/authorized_keys]
I1129 09:01:33.567971 494126 cli_runner.go:164] Run: docker container inspect no-preload-924441 --format={{.State.Status}}
I1129 09:01:33.599907 494126 machine.go:94] provisionDockerMachine start ...
I1129 09:01:33.599999 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:33.634873 494126 main.go:143] libmachine: Using SSH client type: native
I1129 09:01:33.635521 494126 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1129 09:01:33.635590 494126 main.go:143] libmachine: About to run SSH command:
hostname
I1129 09:01:33.636667 494126 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:34766->127.0.0.1:33063: read: connection reset by peer
I1129 09:01:29.724136 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:01:29.724608 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:01:29.724657 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:01:29.724702 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:01:29.763194 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:29.763266 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:29.763286 460401 cri.go:89] found id: ""
I1129 09:01:29.763304 460401 logs.go:282] 2 containers: [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:01:29.763372 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.769877 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.774814 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:01:29.774887 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:01:29.810078 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:29.810105 460401 cri.go:89] found id: ""
I1129 09:01:29.810116 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:01:29.810167 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.815272 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:01:29.815349 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:01:29.851653 460401 cri.go:89] found id: ""
I1129 09:01:29.851680 460401 logs.go:282] 0 containers: []
W1129 09:01:29.851691 460401 logs.go:284] No container was found matching "coredns"
I1129 09:01:29.851700 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:01:29.851773 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:01:29.883424 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:29.883449 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:29.883456 460401 cri.go:89] found id: ""
I1129 09:01:29.883466 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:01:29.883537 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.889105 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.894072 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:01:29.894150 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:01:29.924971 460401 cri.go:89] found id: ""
I1129 09:01:29.925006 460401 logs.go:282] 0 containers: []
W1129 09:01:29.925019 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:01:29.925027 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:01:29.925129 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:01:29.954168 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:29.954194 460401 cri.go:89] found id: "f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:29.954199 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:29.954203 460401 cri.go:89] found id: ""
I1129 09:01:29.954214 460401 logs.go:282] 3 containers: [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:01:29.954278 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.959542 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.964240 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:29.968754 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:01:29.968820 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:01:29.999663 460401 cri.go:89] found id: ""
I1129 09:01:29.999685 460401 logs.go:282] 0 containers: []
W1129 09:01:29.999694 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:01:29.999700 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:01:29.999780 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:01:30.029803 460401 cri.go:89] found id: ""
I1129 09:01:30.029833 460401 logs.go:282] 0 containers: []
W1129 09:01:30.029845 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:01:30.029859 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:01:30.029877 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:30.069873 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:01:30.069904 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:30.108923 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:01:30.108958 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:30.146649 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:01:30.146682 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:01:30.190480 460401 logs.go:123] Gathering logs for container status ...
I1129 09:01:30.190514 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:01:30.225134 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:01:30.225167 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:01:30.299416 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:01:30.299461 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:01:30.314711 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:01:30.314766 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:01:30.384833 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:01:30.384856 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:01:30.384879 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:30.420690 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:01:30.420720 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:30.476182 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:01:30.476221 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:30.507666 460401 logs.go:123] Gathering logs for kube-controller-manager [f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00] ...
I1129 09:01:30.507698 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:30.536613 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:01:30.536640 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:33.076844 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:01:33.077304 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:01:33.077371 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:01:33.077426 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:01:33.111899 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:33.111922 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:33.111928 460401 cri.go:89] found id: ""
I1129 09:01:33.111938 460401 logs.go:282] 2 containers: [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:01:33.111995 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.117191 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.122615 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:01:33.122688 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:01:33.163794 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:33.163822 460401 cri.go:89] found id: ""
I1129 09:01:33.163834 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:01:33.163897 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.170244 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:01:33.170334 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:01:33.203629 460401 cri.go:89] found id: ""
I1129 09:01:33.203662 460401 logs.go:282] 0 containers: []
W1129 09:01:33.203675 460401 logs.go:284] No container was found matching "coredns"
I1129 09:01:33.203683 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:01:33.203759 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:01:33.248112 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:33.248142 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:33.248148 460401 cri.go:89] found id: ""
I1129 09:01:33.248159 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:01:33.248226 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.255192 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.262339 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:01:33.262419 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:01:33.308727 460401 cri.go:89] found id: ""
I1129 09:01:33.308855 460401 logs.go:282] 0 containers: []
W1129 09:01:33.308869 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:01:33.308878 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:01:33.309309 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:01:33.361181 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:33.361234 460401 cri.go:89] found id: "f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:33.361241 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:33.361245 460401 cri.go:89] found id: ""
I1129 09:01:33.361255 460401 logs.go:282] 3 containers: [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:01:33.361343 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.368091 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.374495 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:33.380899 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:01:33.380965 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:01:33.430643 460401 cri.go:89] found id: ""
I1129 09:01:33.430670 460401 logs.go:282] 0 containers: []
W1129 09:01:33.430681 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:01:33.430689 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:01:33.430771 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:01:33.467019 460401 cri.go:89] found id: ""
I1129 09:01:33.467047 460401 logs.go:282] 0 containers: []
W1129 09:01:33.467058 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:01:33.467072 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:01:33.467091 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:01:33.529538 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:01:33.529588 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:33.591866 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:01:33.591912 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:33.664144 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:01:33.664179 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:33.701152 460401 logs.go:123] Gathering logs for kube-controller-manager [f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00] ...
I1129 09:01:33.701195 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:33.735624 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:01:33.735669 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:33.774144 460401 logs.go:123] Gathering logs for container status ...
I1129 09:01:33.774175 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:01:33.808426 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:01:33.808461 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:01:33.898471 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:01:33.898509 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:01:33.914358 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:01:33.914394 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:01:33.978927 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:01:33.978954 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:01:33.978975 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:34.016239 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:01:34.016268 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:34.055208 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:01:34.055239 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:33.275806 493486 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1129 09:01:33.275832 493486 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-295154 chown docker:docker /home/docker/.ssh/authorized_keys]
I1129 09:01:33.349350 493486 cli_runner.go:164] Run: docker container inspect old-k8s-version-295154 --format={{.State.Status}}
I1129 09:01:33.378383 493486 machine.go:94] provisionDockerMachine start ...
I1129 09:01:33.378475 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:33.410015 493486 main.go:143] libmachine: Using SSH client type: native
I1129 09:01:33.410367 493486 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33058 <nil> <nil>}
I1129 09:01:33.410384 493486 main.go:143] libmachine: About to run SSH command:
hostname
I1129 09:01:33.577990 493486 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-295154
I1129 09:01:33.578018 493486 ubuntu.go:182] provisioning hostname "old-k8s-version-295154"
I1129 09:01:33.578086 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:33.609401 493486 main.go:143] libmachine: Using SSH client type: native
I1129 09:01:33.609890 493486 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33058 <nil> <nil>}
I1129 09:01:33.609953 493486 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-295154 && echo "old-k8s-version-295154" | sudo tee /etc/hostname
I1129 09:01:33.789112 493486 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-295154
I1129 09:01:33.789205 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:33.813423 493486 main.go:143] libmachine: Using SSH client type: native
I1129 09:01:33.813741 493486 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33058 <nil> <nil>}
I1129 09:01:33.813774 493486 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-295154' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-295154/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-295154' | sudo tee -a /etc/hosts;
fi
fi
I1129 09:01:33.966671 493486 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1129 09:01:33.966701 493486 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/22000-255825/.minikube CaCertPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/22000-255825/.minikube}
I1129 09:01:33.966720 493486 ubuntu.go:190] setting up certificates
I1129 09:01:33.966746 493486 provision.go:84] configureAuth start
I1129 09:01:33.966809 493486 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-295154
I1129 09:01:33.987509 493486 provision.go:143] copyHostCerts
I1129 09:01:33.987591 493486 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-255825/.minikube/ca.pem, removing ...
I1129 09:01:33.987609 493486 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-255825/.minikube/ca.pem
I1129 09:01:33.987703 493486 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/22000-255825/.minikube/ca.pem (1078 bytes)
I1129 09:01:33.987854 493486 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-255825/.minikube/cert.pem, removing ...
I1129 09:01:33.987873 493486 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-255825/.minikube/cert.pem
I1129 09:01:33.987926 493486 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/22000-255825/.minikube/cert.pem (1123 bytes)
I1129 09:01:33.988030 493486 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-255825/.minikube/key.pem, removing ...
I1129 09:01:33.988043 493486 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-255825/.minikube/key.pem
I1129 09:01:33.988093 493486 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/22000-255825/.minikube/key.pem (1679 bytes)
I1129 09:01:33.988197 493486 provision.go:117] generating server cert: /home/jenkins/minikube-integration/22000-255825/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-295154 san=[127.0.0.1 192.168.76.2 localhost minikube old-k8s-version-295154]
I1129 09:01:34.173289 493486 provision.go:177] copyRemoteCerts
I1129 09:01:34.173365 493486 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1129 09:01:34.173409 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:34.192053 493486 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa Username:docker}
I1129 09:01:34.294293 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1129 09:01:34.313898 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1129 09:01:34.331337 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1129 09:01:34.348272 493486 provision.go:87] duration metric: took 381.510752ms to configureAuth
I1129 09:01:34.348301 493486 ubuntu.go:206] setting minikube options for container-runtime
I1129 09:01:34.348457 493486 config.go:182] Loaded profile config "old-k8s-version-295154": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1129 09:01:34.348472 493486 machine.go:97] duration metric: took 970.068662ms to provisionDockerMachine
I1129 09:01:34.348481 493486 client.go:176] duration metric: took 5.886553133s to LocalClient.Create
I1129 09:01:34.348502 493486 start.go:167] duration metric: took 5.88663904s to libmachine.API.Create "old-k8s-version-295154"
I1129 09:01:34.348512 493486 start.go:293] postStartSetup for "old-k8s-version-295154" (driver="docker")
I1129 09:01:34.348520 493486 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1129 09:01:34.348570 493486 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1129 09:01:34.348614 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:34.366501 493486 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa Username:docker}
I1129 09:01:34.469910 493486 ssh_runner.go:195] Run: cat /etc/os-release
I1129 09:01:34.473823 493486 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1129 09:01:34.473855 493486 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1129 09:01:34.473868 493486 filesync.go:126] Scanning /home/jenkins/minikube-integration/22000-255825/.minikube/addons for local assets ...
I1129 09:01:34.473922 493486 filesync.go:126] Scanning /home/jenkins/minikube-integration/22000-255825/.minikube/files for local assets ...
I1129 09:01:34.474038 493486 filesync.go:149] local asset: /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem -> 2594832.pem in /etc/ssl/certs
I1129 09:01:34.474177 493486 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1129 09:01:34.481912 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem --> /etc/ssl/certs/2594832.pem (1708 bytes)
I1129 09:01:34.502433 493486 start.go:296] duration metric: took 153.905912ms for postStartSetup
I1129 09:01:34.502813 493486 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-295154
I1129 09:01:34.520071 493486 profile.go:143] Saving config to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/config.json ...
I1129 09:01:34.520308 493486 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1129 09:01:34.520347 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:34.539111 493486 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa Username:docker}
I1129 09:01:34.640199 493486 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1129 09:01:34.644901 493486 start.go:128] duration metric: took 6.185289215s to createHost
I1129 09:01:34.644928 493486 start.go:83] releasing machines lock for "old-k8s-version-295154", held for 6.185484113s
I1129 09:01:34.644991 493486 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-295154
I1129 09:01:34.662525 493486 ssh_runner.go:195] Run: cat /version.json
I1129 09:01:34.662583 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:34.662584 493486 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1129 09:01:34.662648 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:34.679837 493486 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa Username:docker}
I1129 09:01:34.681115 493486 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa Username:docker}
I1129 09:01:34.833568 493486 ssh_runner.go:195] Run: systemctl --version
I1129 09:01:34.840355 493486 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1129 09:01:34.844844 493486 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1129 09:01:34.844907 493486 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1129 09:01:34.869137 493486 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1129 09:01:34.869161 493486 start.go:496] detecting cgroup driver to use...
I1129 09:01:34.869194 493486 detect.go:190] detected "systemd" cgroup driver on host os
I1129 09:01:34.869251 493486 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1129 09:01:34.883461 493486 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1129 09:01:34.895885 493486 docker.go:218] disabling cri-docker service (if available) ...
I1129 09:01:34.895942 493486 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1129 09:01:34.912002 493486 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1129 09:01:34.929350 493486 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1129 09:01:35.015369 493486 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1129 09:01:35.101537 493486 docker.go:234] disabling docker service ...
I1129 09:01:35.101597 493486 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1129 09:01:35.120759 493486 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1129 09:01:35.133226 493486 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1129 09:01:35.217122 493486 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1129 09:01:35.301702 493486 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1129 09:01:35.314440 493486 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1129 09:01:35.328312 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1129 09:01:35.338331 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1129 09:01:35.346975 493486 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1129 09:01:35.347033 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1129 09:01:35.355511 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1129 09:01:35.363986 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1129 09:01:35.372342 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1129 09:01:35.380589 493486 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1129 09:01:35.388205 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1129 09:01:35.396344 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1129 09:01:35.404459 493486 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1129 09:01:35.412783 493486 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1129 09:01:35.420177 493486 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1129 09:01:35.427378 493486 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:01:35.508150 493486 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1129 09:01:35.605801 493486 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1129 09:01:35.605868 493486 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1129 09:01:35.610095 493486 start.go:564] Will wait 60s for crictl version
I1129 09:01:35.610140 493486 ssh_runner.go:195] Run: which crictl
I1129 09:01:35.613826 493486 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1129 09:01:35.640869 493486 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1129 09:01:35.640947 493486 ssh_runner.go:195] Run: containerd --version
I1129 09:01:35.662573 493486 ssh_runner.go:195] Run: containerd --version
I1129 09:01:35.686990 493486 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1129 09:01:35.688126 493486 cli_runner.go:164] Run: docker network inspect old-k8s-version-295154 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1129 09:01:35.705269 493486 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I1129 09:01:35.709565 493486 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1129 09:01:35.720029 493486 kubeadm.go:884] updating cluster {Name:old-k8s-version-295154 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-295154 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1129 09:01:35.720146 493486 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1129 09:01:35.720192 493486 ssh_runner.go:195] Run: sudo crictl images --output json
I1129 09:01:35.745337 493486 containerd.go:627] all images are preloaded for containerd runtime.
I1129 09:01:35.745359 493486 containerd.go:534] Images already preloaded, skipping extraction
I1129 09:01:35.745433 493486 ssh_runner.go:195] Run: sudo crictl images --output json
I1129 09:01:35.768552 493486 containerd.go:627] all images are preloaded for containerd runtime.
I1129 09:01:35.768573 493486 cache_images.go:86] Images are preloaded, skipping loading
I1129 09:01:35.768582 493486 kubeadm.go:935] updating node { 192.168.76.2 8443 v1.28.0 containerd true true} ...
I1129 09:01:35.768708 493486 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-295154 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-295154 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1129 09:01:35.768800 493486 ssh_runner.go:195] Run: sudo crictl info
I1129 09:01:35.793684 493486 cni.go:84] Creating CNI manager for ""
I1129 09:01:35.793704 493486 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1129 09:01:35.793722 493486 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1129 09:01:35.793760 493486 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-295154 NodeName:old-k8s-version-295154 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt S
taticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1129 09:01:35.793881 493486 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-295154"
kubeletExtraArgs:
node-ip: 192.168.76.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1129 09:01:35.793941 493486 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1129 09:01:35.801702 493486 binaries.go:51] Found k8s binaries, skipping transfer
I1129 09:01:35.801779 493486 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1129 09:01:35.809370 493486 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1129 09:01:35.821645 493486 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1129 09:01:35.837123 493486 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2175 bytes)
I1129 09:01:35.849282 493486 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I1129 09:01:35.852777 493486 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1129 09:01:35.862291 493486 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:01:35.945522 493486 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1129 09:01:35.967020 493486 certs.go:69] Setting up /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154 for IP: 192.168.76.2
I1129 09:01:35.967046 493486 certs.go:195] generating shared ca certs ...
I1129 09:01:35.967066 493486 certs.go:227] acquiring lock for ca certs: {Name:mk5e6bcae0a6944966b241f3c6197a472703c991 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:35.967208 493486 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/22000-255825/.minikube/ca.key
I1129 09:01:35.967259 493486 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/22000-255825/.minikube/proxy-client-ca.key
I1129 09:01:35.967269 493486 certs.go:257] generating profile certs ...
I1129 09:01:35.967334 493486 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/client.key
I1129 09:01:35.967347 493486 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/client.crt with IP's: []
I1129 09:01:36.097254 493486 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/client.crt ...
I1129 09:01:36.097290 493486 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/client.crt: {Name:mk21cfae97f1407d02cd99fe2a74be759b699397 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:36.097496 493486 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/client.key ...
I1129 09:01:36.097514 493486 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/client.key: {Name:mk0736bb845004e9c4d4a2d8602930ec0568eec2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:36.097631 493486 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.key.a040bf72
I1129 09:01:36.097693 493486 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.crt.a040bf72 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I1129 09:01:36.144552 493486 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.crt.a040bf72 ...
I1129 09:01:36.144579 493486 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.crt.a040bf72: {Name:mk3fedcec97acb487835213600ee8b696c362f94 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:36.144774 493486 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.key.a040bf72 ...
I1129 09:01:36.144793 493486 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.key.a040bf72: {Name:mk9dc52d2daf1391895a4ee3c561f559be0e2755 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:36.144904 493486 certs.go:382] copying /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.crt.a040bf72 -> /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.crt
I1129 09:01:36.145012 493486 certs.go:386] copying /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.key.a040bf72 -> /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.key
I1129 09:01:36.145117 493486 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.key
I1129 09:01:36.145138 493486 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.crt with IP's: []
I1129 09:01:36.307914 493486 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.crt ...
I1129 09:01:36.307946 493486 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.crt: {Name:mk698ad1b9e2e29d385fd97b123d5b48273c6d5b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:36.308144 493486 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.key ...
I1129 09:01:36.308172 493486 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.key: {Name:mkcfd3db96260b6b8677060f32dcbd4dd8f838bc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:36.308432 493486 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/259483.pem (1338 bytes)
W1129 09:01:36.308490 493486 certs.go:480] ignoring /home/jenkins/minikube-integration/22000-255825/.minikube/certs/259483_empty.pem, impossibly tiny 0 bytes
I1129 09:01:36.308506 493486 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca-key.pem (1675 bytes)
I1129 09:01:36.308543 493486 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem (1078 bytes)
I1129 09:01:36.308590 493486 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem (1123 bytes)
I1129 09:01:36.308633 493486 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/key.pem (1679 bytes)
I1129 09:01:36.308689 493486 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem (1708 bytes)
I1129 09:01:36.309360 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1129 09:01:36.328372 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1129 09:01:36.345872 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1129 09:01:36.363285 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1129 09:01:36.380427 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1129 09:01:36.397563 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1129 09:01:36.414929 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1129 09:01:36.432334 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/old-k8s-version-295154/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1129 09:01:36.449233 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem --> /usr/share/ca-certificates/2594832.pem (1708 bytes)
I1129 09:01:36.469085 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1129 09:01:36.485869 493486 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/certs/259483.pem --> /usr/share/ca-certificates/259483.pem (1338 bytes)
I1129 09:01:36.502784 493486 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1129 09:01:36.515208 493486 ssh_runner.go:195] Run: openssl version
I1129 09:01:36.521390 493486 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1129 09:01:36.529514 493486 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1129 09:01:36.533021 493486 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 29 08:29 /usr/share/ca-certificates/minikubeCA.pem
I1129 09:01:36.533062 493486 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1129 09:01:36.567579 493486 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1129 09:01:36.576162 493486 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/259483.pem && ln -fs /usr/share/ca-certificates/259483.pem /etc/ssl/certs/259483.pem"
I1129 09:01:36.584343 493486 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/259483.pem
I1129 09:01:36.588122 493486 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 29 08:35 /usr/share/ca-certificates/259483.pem
I1129 09:01:36.588176 493486 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/259483.pem
I1129 09:01:36.626659 493486 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/259483.pem /etc/ssl/certs/51391683.0"
I1129 09:01:36.635780 493486 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/2594832.pem && ln -fs /usr/share/ca-certificates/2594832.pem /etc/ssl/certs/2594832.pem"
I1129 09:01:36.644862 493486 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2594832.pem
I1129 09:01:36.648851 493486 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 29 08:35 /usr/share/ca-certificates/2594832.pem
I1129 09:01:36.648906 493486 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2594832.pem
I1129 09:01:36.691340 493486 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/2594832.pem /etc/ssl/certs/3ec20f2e.0"
I1129 09:01:36.701173 493486 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1129 09:01:36.705050 493486 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1129 09:01:36.705110 493486 kubeadm.go:401] StartCluster: {Name:old-k8s-version-295154 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-295154 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1129 09:01:36.705201 493486 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1129 09:01:36.705272 493486 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1129 09:01:36.734535 493486 cri.go:89] found id: ""
I1129 09:01:36.734592 493486 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1129 09:01:36.743400 493486 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1129 09:01:36.751273 493486 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1129 09:01:36.751332 493486 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1129 09:01:36.760386 493486 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1129 09:01:36.760404 493486 kubeadm.go:158] found existing configuration files:
I1129 09:01:36.760450 493486 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1129 09:01:36.768796 493486 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1129 09:01:36.768854 493486 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1129 09:01:36.776326 493486 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1129 09:01:36.784663 493486 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1129 09:01:36.784720 493486 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1129 09:01:36.793650 493486 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1129 09:01:36.801817 493486 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1129 09:01:36.801887 493486 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1129 09:01:36.811081 493486 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1129 09:01:36.819075 493486 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1129 09:01:36.819130 493486 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1129 09:01:36.827369 493486 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1129 09:01:36.885752 493486 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1129 09:01:36.885824 493486 kubeadm.go:319] [preflight] Running pre-flight checks
I1129 09:01:36.932588 493486 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1129 09:01:36.932993 493486 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1129 09:01:36.933139 493486 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1129 09:01:36.933232 493486 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1129 09:01:36.933332 493486 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1129 09:01:36.933468 493486 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1129 09:01:36.933539 493486 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1129 09:01:36.933597 493486 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1129 09:01:36.933656 493486 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1129 09:01:36.933717 493486 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1129 09:01:36.933794 493486 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1129 09:01:37.018039 493486 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1129 09:01:37.018169 493486 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1129 09:01:37.018319 493486 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1129 09:01:37.171075 493486 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1129 09:01:37.173428 493486 out.go:252] - Generating certificates and keys ...
I1129 09:01:37.173535 493486 kubeadm.go:319] [certs] Using existing ca certificate authority
I1129 09:01:37.173613 493486 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1129 09:01:37.301964 493486 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1129 09:01:37.410711 493486 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1129 09:01:37.550821 493486 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1129 09:01:37.787553 493486 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1129 09:01:37.889172 493486 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1129 09:01:37.889414 493486 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-295154] and IPs [192.168.76.2 127.0.0.1 ::1]
I1129 09:01:38.063017 493486 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1129 09:01:38.063214 493486 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-295154] and IPs [192.168.76.2 127.0.0.1 ::1]
I1129 09:01:38.202234 493486 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1129 09:01:38.262563 493486 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1129 09:01:36.787780 494126 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-924441
I1129 09:01:36.787807 494126 ubuntu.go:182] provisioning hostname "no-preload-924441"
I1129 09:01:36.787868 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:36.808836 494126 main.go:143] libmachine: Using SSH client type: native
I1129 09:01:36.809153 494126 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1129 09:01:36.809173 494126 main.go:143] libmachine: About to run SSH command:
sudo hostname no-preload-924441 && echo "no-preload-924441" | sudo tee /etc/hostname
I1129 09:01:36.973090 494126 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-924441
I1129 09:01:36.973172 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:36.993095 494126 main.go:143] libmachine: Using SSH client type: native
I1129 09:01:36.993348 494126 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1129 09:01:36.993366 494126 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sno-preload-924441' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 no-preload-924441/g' /etc/hosts;
else
echo '127.0.1.1 no-preload-924441' | sudo tee -a /etc/hosts;
fi
fi
I1129 09:01:37.147252 494126 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1129 09:01:37.147286 494126 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/22000-255825/.minikube CaCertPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/22000-255825/.minikube}
I1129 09:01:37.147336 494126 ubuntu.go:190] setting up certificates
I1129 09:01:37.147350 494126 provision.go:84] configureAuth start
I1129 09:01:37.147407 494126 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-924441
I1129 09:01:37.167771 494126 provision.go:143] copyHostCerts
I1129 09:01:37.167841 494126 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-255825/.minikube/ca.pem, removing ...
I1129 09:01:37.167856 494126 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-255825/.minikube/ca.pem
I1129 09:01:37.167941 494126 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/22000-255825/.minikube/ca.pem (1078 bytes)
I1129 09:01:37.168073 494126 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-255825/.minikube/cert.pem, removing ...
I1129 09:01:37.168087 494126 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-255825/.minikube/cert.pem
I1129 09:01:37.168135 494126 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/22000-255825/.minikube/cert.pem (1123 bytes)
I1129 09:01:37.168246 494126 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-255825/.minikube/key.pem, removing ...
I1129 09:01:37.168259 494126 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-255825/.minikube/key.pem
I1129 09:01:37.168304 494126 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/22000-255825/.minikube/key.pem (1679 bytes)
I1129 09:01:37.168383 494126 provision.go:117] generating server cert: /home/jenkins/minikube-integration/22000-255825/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca-key.pem org=jenkins.no-preload-924441 san=[127.0.0.1 192.168.103.2 localhost minikube no-preload-924441]
I1129 09:01:37.302569 494126 provision.go:177] copyRemoteCerts
I1129 09:01:37.302625 494126 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1129 09:01:37.302676 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:37.320965 494126 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa Username:docker}
I1129 09:01:37.425520 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1129 09:01:37.446589 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1129 09:01:37.463963 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1129 09:01:37.480486 494126 provision.go:87] duration metric: took 333.119398ms to configureAuth
I1129 09:01:37.480511 494126 ubuntu.go:206] setting minikube options for container-runtime
I1129 09:01:37.480667 494126 config.go:182] Loaded profile config "no-preload-924441": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1129 09:01:37.480680 494126 machine.go:97] duration metric: took 3.880753165s to provisionDockerMachine
I1129 09:01:37.480691 494126 client.go:176] duration metric: took 7.771282469s to LocalClient.Create
I1129 09:01:37.480714 494126 start.go:167] duration metric: took 7.771346771s to libmachine.API.Create "no-preload-924441"
I1129 09:01:37.480726 494126 start.go:293] postStartSetup for "no-preload-924441" (driver="docker")
I1129 09:01:37.480750 494126 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1129 09:01:37.480814 494126 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1129 09:01:37.480883 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:37.498996 494126 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa Username:docker}
I1129 09:01:37.602864 494126 ssh_runner.go:195] Run: cat /etc/os-release
I1129 09:01:37.606394 494126 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1129 09:01:37.606428 494126 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1129 09:01:37.606439 494126 filesync.go:126] Scanning /home/jenkins/minikube-integration/22000-255825/.minikube/addons for local assets ...
I1129 09:01:37.606502 494126 filesync.go:126] Scanning /home/jenkins/minikube-integration/22000-255825/.minikube/files for local assets ...
I1129 09:01:37.606593 494126 filesync.go:149] local asset: /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem -> 2594832.pem in /etc/ssl/certs
I1129 09:01:37.606724 494126 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1129 09:01:37.614670 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem --> /etc/ssl/certs/2594832.pem (1708 bytes)
I1129 09:01:37.635134 494126 start.go:296] duration metric: took 154.380805ms for postStartSetup
I1129 09:01:37.635554 494126 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-924441
I1129 09:01:37.655528 494126 profile.go:143] Saving config to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/config.json ...
I1129 09:01:37.655850 494126 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1129 09:01:37.655900 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:37.677317 494126 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa Username:docker}
I1129 09:01:37.781275 494126 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1129 09:01:37.786042 494126 start.go:128] duration metric: took 8.07881841s to createHost
I1129 09:01:37.786069 494126 start.go:83] releasing machines lock for "no-preload-924441", held for 8.078998368s
I1129 09:01:37.786141 494126 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-924441
I1129 09:01:37.805459 494126 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1129 09:01:37.805494 494126 ssh_runner.go:195] Run: cat /version.json
I1129 09:01:37.805552 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:37.805561 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:01:37.824515 494126 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa Username:docker}
I1129 09:01:37.825042 494126 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa Username:docker}
I1129 09:01:37.978797 494126 ssh_runner.go:195] Run: systemctl --version
I1129 09:01:37.985561 494126 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1129 09:01:37.990121 494126 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1129 09:01:37.990198 494126 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1129 09:01:38.014806 494126 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1129 09:01:38.014833 494126 start.go:496] detecting cgroup driver to use...
I1129 09:01:38.014872 494126 detect.go:190] detected "systemd" cgroup driver on host os
I1129 09:01:38.014922 494126 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1129 09:01:38.028890 494126 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1129 09:01:38.040635 494126 docker.go:218] disabling cri-docker service (if available) ...
I1129 09:01:38.040704 494126 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1129 09:01:38.059274 494126 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1129 09:01:38.079903 494126 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1129 09:01:38.160895 494126 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1129 09:01:38.248638 494126 docker.go:234] disabling docker service ...
I1129 09:01:38.248693 494126 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1129 09:01:38.270699 494126 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1129 09:01:38.283241 494126 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1129 09:01:38.364018 494126 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1129 09:01:38.451578 494126 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1129 09:01:38.464900 494126 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1129 09:01:38.478711 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1129 09:01:38.488688 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1129 09:01:38.497188 494126 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1129 09:01:38.497235 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1129 09:01:38.506143 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1129 09:01:38.514500 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1129 09:01:38.522578 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1129 09:01:38.530605 494126 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1129 09:01:38.538074 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1129 09:01:38.546395 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1129 09:01:38.554633 494126 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1129 09:01:38.564192 494126 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1129 09:01:38.571328 494126 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1129 09:01:38.578488 494126 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:01:38.657072 494126 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1129 09:01:38.731899 494126 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1129 09:01:38.731970 494126 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1129 09:01:38.736165 494126 start.go:564] Will wait 60s for crictl version
I1129 09:01:38.736223 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:38.739821 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1129 09:01:38.765727 494126 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1129 09:01:38.765799 494126 ssh_runner.go:195] Run: containerd --version
I1129 09:01:38.788554 494126 ssh_runner.go:195] Run: containerd --version
I1129 09:01:38.813801 494126 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
I1129 09:01:38.554215 493486 kubeadm.go:319] [certs] Generating "sa" key and public key
I1129 09:01:38.554337 493486 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1129 09:01:38.871587 493486 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1129 09:01:39.076048 493486 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1129 09:01:39.365556 493486 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1129 09:01:39.428949 493486 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1129 09:01:39.429579 493486 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1129 09:01:39.438444 493486 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1129 09:01:38.814940 494126 cli_runner.go:164] Run: docker network inspect no-preload-924441 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1129 09:01:38.832444 494126 ssh_runner.go:195] Run: grep 192.168.103.1 host.minikube.internal$ /etc/hosts
I1129 09:01:38.836556 494126 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.103.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1129 09:01:38.846826 494126 kubeadm.go:884] updating cluster {Name:no-preload-924441 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-924441 Namespace:default APIServerHAVIP: APIServerName:minikubeCA API
ServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemu
FirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1129 09:01:38.846940 494126 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1129 09:01:38.846988 494126 ssh_runner.go:195] Run: sudo crictl images --output json
I1129 09:01:38.875513 494126 containerd.go:623] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.34.1". assuming images are not preloaded.
I1129 09:01:38.875537 494126 cache_images.go:90] LoadCachedImages start: [registry.k8s.io/kube-apiserver:v1.34.1 registry.k8s.io/kube-controller-manager:v1.34.1 registry.k8s.io/kube-scheduler:v1.34.1 registry.k8s.io/kube-proxy:v1.34.1 registry.k8s.io/pause:3.10.1 registry.k8s.io/etcd:3.6.4-0 registry.k8s.io/coredns/coredns:v1.12.1 gcr.io/k8s-minikube/storage-provisioner:v5]
I1129 09:01:38.875606 494126 image.go:138] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:38.875606 494126 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:01:38.875633 494126 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1129 09:01:38.875642 494126 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:01:38.875663 494126 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1129 09:01:38.875672 494126 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:01:38.875613 494126 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1129 09:01:38.875710 494126 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:01:38.877065 494126 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:01:38.877082 494126 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1129 09:01:38.877098 494126 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:01:38.877104 494126 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1129 09:01:38.877132 494126 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:01:38.877185 494126 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:01:38.877233 494126 image.go:181] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:38.877189 494126 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1129 09:01:39.045541 494126 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.34.1" and sha "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813"
I1129 09:01:39.045605 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:01:39.049466 494126 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.34.1" and sha "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f"
I1129 09:01:39.049525 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:01:39.055696 494126 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.34.1" and sha "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97"
I1129 09:01:39.055787 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:01:39.065913 494126 containerd.go:267] Checking existence of image with name "registry.k8s.io/etcd:3.6.4-0" and sha "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115"
I1129 09:01:39.065987 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/etcd:3.6.4-0
I1129 09:01:39.071326 494126 cache_images.go:118] "registry.k8s.io/kube-scheduler:v1.34.1" needs transfer: "registry.k8s.io/kube-scheduler:v1.34.1" does not exist at hash "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813" in container runtime
I1129 09:01:39.071386 494126 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:01:39.071433 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.072494 494126 containerd.go:267] Checking existence of image with name "registry.k8s.io/coredns/coredns:v1.12.1" and sha "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969"
I1129 09:01:39.072560 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:01:39.074055 494126 cache_images.go:118] "registry.k8s.io/kube-controller-manager:v1.34.1" needs transfer: "registry.k8s.io/kube-controller-manager:v1.34.1" does not exist at hash "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f" in container runtime
I1129 09:01:39.074103 494126 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:01:39.074155 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.079805 494126 cache_images.go:118] "registry.k8s.io/kube-apiserver:v1.34.1" needs transfer: "registry.k8s.io/kube-apiserver:v1.34.1" does not exist at hash "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97" in container runtime
I1129 09:01:39.079853 494126 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:01:39.079906 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.090225 494126 cache_images.go:118] "registry.k8s.io/etcd:3.6.4-0" needs transfer: "registry.k8s.io/etcd:3.6.4-0" does not exist at hash "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115" in container runtime
I1129 09:01:39.090271 494126 cri.go:218] Removing image: registry.k8s.io/etcd:3.6.4-0
I1129 09:01:39.090279 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:01:39.090318 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.094954 494126 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.34.1" and sha "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7"
I1129 09:01:39.095016 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-proxy:v1.34.1
I1129 09:01:39.096356 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:01:39.096365 494126 cache_images.go:118] "registry.k8s.io/coredns/coredns:v1.12.1" needs transfer: "registry.k8s.io/coredns/coredns:v1.12.1" does not exist at hash "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969" in container runtime
I1129 09:01:39.096402 494126 cri.go:218] Removing image: registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:01:39.096438 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:01:39.096440 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.108053 494126 containerd.go:267] Checking existence of image with name "registry.k8s.io/pause:3.10.1" and sha "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f"
I1129 09:01:39.108111 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/pause:3.10.1
I1129 09:01:39.125198 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:01:39.125300 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1129 09:01:39.125361 494126 cache_images.go:118] "registry.k8s.io/kube-proxy:v1.34.1" needs transfer: "registry.k8s.io/kube-proxy:v1.34.1" does not exist at hash "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7" in container runtime
I1129 09:01:39.125408 494126 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.34.1
I1129 09:01:39.125455 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.128374 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:01:39.132565 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:01:39.132640 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:01:39.138113 494126 cache_images.go:118] "registry.k8s.io/pause:3.10.1" needs transfer: "registry.k8s.io/pause:3.10.1" does not exist at hash "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f" in container runtime
I1129 09:01:39.138163 494126 cri.go:218] Removing image: registry.k8s.io/pause:3.10.1
I1129 09:01:39.138200 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.167013 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:01:39.167128 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1129 09:01:39.167330 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:01:39.167330 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1129 09:01:39.167996 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:01:39.173113 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:01:39.173171 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1129 09:01:39.214078 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1129 09:01:39.214193 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1
I1129 09:01:39.214389 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1129 09:01:39.214576 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1129 09:01:39.220552 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1129 09:01:39.220649 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1
I1129 09:01:39.220857 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-scheduler_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-scheduler_v1.34.1': No such file or directory
I1129 09:01:39.220895 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 --> /var/lib/minikube/images/kube-scheduler_v1.34.1 (17396736 bytes)
I1129 09:01:39.222433 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:01:39.222493 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1129 09:01:39.222587 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1129 09:01:39.222669 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1129 09:01:39.275608 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1129 09:01:39.275622 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1129 09:01:39.275679 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-apiserver_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-apiserver_v1.34.1': No such file or directory
I1129 09:01:39.275707 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 --> /var/lib/minikube/images/kube-apiserver_v1.34.1 (27073024 bytes)
I1129 09:01:39.275716 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0
I1129 09:01:39.287672 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1129 09:01:39.287708 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1129 09:01:39.287708 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-controller-manager_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-controller-manager_v1.34.1': No such file or directory
I1129 09:01:39.287808 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 --> /var/lib/minikube/images/kube-controller-manager_v1.34.1 (22831104 bytes)
I1129 09:01:39.287825 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1
I1129 09:01:39.339051 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/etcd_3.6.4-0: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/etcd_3.6.4-0': No such file or directory
I1129 09:01:39.339082 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1129 09:01:39.339092 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 --> /var/lib/minikube/images/etcd_3.6.4-0 (74320896 bytes)
I1129 09:01:39.339110 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/coredns_v1.12.1: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/coredns_v1.12.1': No such file or directory
I1129 09:01:39.339137 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 --> /var/lib/minikube/images/coredns_v1.12.1 (22394368 bytes)
I1129 09:01:39.339173 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1
I1129 09:01:39.339202 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1129 09:01:39.339317 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1
I1129 09:01:39.424948 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/pause_3.10.1: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/pause_3.10.1': No such file or directory
I1129 09:01:39.424997 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 --> /var/lib/minikube/images/pause_3.10.1 (321024 bytes)
I1129 09:01:39.425030 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-proxy_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-proxy_v1.34.1': No such file or directory
I1129 09:01:39.425058 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 --> /var/lib/minikube/images/kube-proxy_v1.34.1 (25966080 bytes)
I1129 09:01:36.592807 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:01:36.593240 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:01:36.593304 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:01:36.593360 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:01:36.620981 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:36.621002 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:36.621008 460401 cri.go:89] found id: ""
I1129 09:01:36.621018 460401 logs.go:282] 2 containers: [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:01:36.621079 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.627593 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.632350 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:01:36.632420 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:01:36.660070 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:36.660091 460401 cri.go:89] found id: ""
I1129 09:01:36.660100 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:01:36.660156 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.664644 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:01:36.664720 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:01:36.696935 460401 cri.go:89] found id: ""
I1129 09:01:36.696967 460401 logs.go:282] 0 containers: []
W1129 09:01:36.696977 460401 logs.go:284] No container was found matching "coredns"
I1129 09:01:36.696985 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:01:36.697045 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:01:36.726832 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:36.726857 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:36.726864 460401 cri.go:89] found id: ""
I1129 09:01:36.726874 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:01:36.726928 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.732693 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.737783 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:01:36.737848 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:01:36.765201 460401 cri.go:89] found id: ""
I1129 09:01:36.765229 460401 logs.go:282] 0 containers: []
W1129 09:01:36.765238 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:01:36.765245 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:01:36.765300 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:01:36.795203 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:36.795231 460401 cri.go:89] found id: "f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:36.795237 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:36.795242 460401 cri.go:89] found id: ""
I1129 09:01:36.795251 460401 logs.go:282] 3 containers: [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:01:36.795316 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.801008 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.806325 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:36.811017 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:01:36.811088 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:01:36.840359 460401 cri.go:89] found id: ""
I1129 09:01:36.840386 460401 logs.go:282] 0 containers: []
W1129 09:01:36.840397 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:01:36.840406 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:01:36.840469 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:01:36.874045 460401 cri.go:89] found id: ""
I1129 09:01:36.874068 460401 logs.go:282] 0 containers: []
W1129 09:01:36.874075 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:01:36.874085 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:01:36.874099 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:01:36.950404 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:01:36.950426 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:01:36.950442 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:36.994232 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:01:36.994264 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:37.049507 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:01:37.049546 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:37.087133 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:01:37.087165 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:37.117577 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:01:37.117602 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:37.154176 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:01:37.154210 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:37.197090 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:01:37.197121 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:37.240775 460401 logs.go:123] Gathering logs for kube-controller-manager [f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00] ...
I1129 09:01:37.240811 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f78d0d97ffa9f2d0cbf8a0cf305a7f0c4323a505bb9b3fa272405c6b22ab9f00"
I1129 09:01:37.269234 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:01:37.269260 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:01:37.312948 460401 logs.go:123] Gathering logs for container status ...
I1129 09:01:37.312979 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:01:37.348500 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:01:37.348527 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:01:37.435755 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:01:37.435786 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:01:39.440026 493486 out.go:252] - Booting up control plane ...
I1129 09:01:39.440161 493486 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1129 09:01:39.440285 493486 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1129 09:01:39.440970 493486 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1129 09:01:39.459308 493486 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1129 09:01:39.460971 493486 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1129 09:01:39.461057 493486 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1129 09:01:39.610284 493486 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1129 09:01:39.952440 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:01:39.952996 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:01:39.953076 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:01:39.953145 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:01:39.990073 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:39.990100 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:39.990107 460401 cri.go:89] found id: ""
I1129 09:01:39.990117 460401 logs.go:282] 2 containers: [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:01:39.990183 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:39.996871 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:40.002374 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:01:40.002458 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:01:40.036502 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:40.036525 460401 cri.go:89] found id: ""
I1129 09:01:40.036542 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:01:40.036600 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:40.044171 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:01:40.044261 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:01:40.084048 460401 cri.go:89] found id: ""
I1129 09:01:40.084165 460401 logs.go:282] 0 containers: []
W1129 09:01:40.084184 460401 logs.go:284] No container was found matching "coredns"
I1129 09:01:40.084195 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:01:40.084329 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:01:40.116869 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:40.116899 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:40.116905 460401 cri.go:89] found id: ""
I1129 09:01:40.116916 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:01:40.116982 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:40.123222 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:40.128079 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:01:40.128146 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:01:40.159071 460401 cri.go:89] found id: ""
I1129 09:01:40.159101 460401 logs.go:282] 0 containers: []
W1129 09:01:40.159112 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:01:40.159120 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:01:40.159178 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:01:40.191945 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:40.191973 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:40.191979 460401 cri.go:89] found id: ""
I1129 09:01:40.191990 460401 logs.go:282] 2 containers: [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:01:40.192055 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:40.197191 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:40.202276 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:01:40.202350 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:01:40.236481 460401 cri.go:89] found id: ""
I1129 09:01:40.236510 460401 logs.go:282] 0 containers: []
W1129 09:01:40.236521 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:01:40.236528 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:01:40.236597 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:01:40.266476 460401 cri.go:89] found id: ""
I1129 09:01:40.266505 460401 logs.go:282] 0 containers: []
W1129 09:01:40.266516 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:01:40.266529 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:01:40.266547 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:40.310670 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:01:40.310713 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:40.362446 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:01:40.362487 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:40.399108 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:01:40.399138 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:40.435770 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:01:40.435799 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:01:40.485497 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:01:40.485541 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:01:40.502944 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:01:40.502977 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:01:40.592582 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:01:40.592610 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:01:40.592626 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:40.634792 460401 logs.go:123] Gathering logs for container status ...
I1129 09:01:40.634828 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:01:40.678348 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:01:40.678382 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:01:40.797799 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:01:40.797849 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:40.854148 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:01:40.854196 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:43.404360 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:01:43.404858 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:01:43.404925 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:01:43.404996 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:01:43.435800 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:43.435836 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:43.435843 460401 cri.go:89] found id: ""
I1129 09:01:43.435854 460401 logs.go:282] 2 containers: [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:01:43.435923 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:43.441287 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:43.445761 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:01:43.445837 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:01:43.474830 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:43.474859 460401 cri.go:89] found id: ""
I1129 09:01:43.474870 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:01:43.474932 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:43.481397 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:01:43.481483 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:01:43.513967 460401 cri.go:89] found id: ""
I1129 09:01:43.513995 460401 logs.go:282] 0 containers: []
W1129 09:01:43.514006 460401 logs.go:284] No container was found matching "coredns"
I1129 09:01:43.514014 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:01:43.514074 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:01:43.550388 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:43.550416 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:43.550421 460401 cri.go:89] found id: ""
I1129 09:01:43.550431 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:01:43.550505 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:43.557316 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:43.563173 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:01:43.563248 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:01:43.599482 460401 cri.go:89] found id: ""
I1129 09:01:43.599524 460401 logs.go:282] 0 containers: []
W1129 09:01:43.599535 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:01:43.599545 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:01:43.599611 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:01:43.637030 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:43.637053 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:43.637059 460401 cri.go:89] found id: ""
I1129 09:01:43.637069 460401 logs.go:282] 2 containers: [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:01:43.637130 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:43.643786 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:43.650011 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:01:43.650089 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:01:43.687244 460401 cri.go:89] found id: ""
I1129 09:01:43.687273 460401 logs.go:282] 0 containers: []
W1129 09:01:43.687295 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:01:43.687303 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:01:43.687372 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:01:43.726453 460401 cri.go:89] found id: ""
I1129 09:01:43.726490 460401 logs.go:282] 0 containers: []
W1129 09:01:43.726501 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:01:43.726515 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:01:43.726533 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:43.795442 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:01:43.795490 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:43.841417 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:01:43.841457 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:43.888511 460401 logs.go:123] Gathering logs for container status ...
I1129 09:01:43.888554 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:01:43.930753 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:01:43.930789 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:01:44.043358 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:01:44.043410 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:01:44.065065 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:01:44.065107 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:44.112915 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:01:44.112958 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:01:44.174077 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:01:44.174120 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:01:44.247887 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:01:44.247909 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:01:44.247927 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:44.290842 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:01:44.290882 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:44.335297 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:01:44.335330 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:39.522040 494126 containerd.go:285] Loading image: /var/lib/minikube/images/pause_3.10.1
I1129 09:01:39.522116 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/pause_3.10.1
I1129 09:01:39.664265 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 from cache
I1129 09:01:39.664314 494126 containerd.go:285] Loading image: /var/lib/minikube/images/kube-scheduler_v1.34.1
I1129 09:01:39.664386 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1
I1129 09:01:40.291377 494126 containerd.go:267] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562"
I1129 09:01:40.291450 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:40.811289 494126 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1: (1.146868238s)
I1129 09:01:40.811331 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 from cache
I1129 09:01:40.811358 494126 containerd.go:285] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1129 09:01:40.811407 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1129 09:01:40.811531 494126 cache_images.go:118] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562" in container runtime
I1129 09:01:40.811570 494126 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:40.811610 494126 ssh_runner.go:195] Run: which crictl
I1129 09:01:41.858427 494126 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1: (1.046983131s)
I1129 09:01:41.858463 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 from cache
I1129 09:01:41.858488 494126 containerd.go:285] Loading image: /var/lib/minikube/images/coredns_v1.12.1
I1129 09:01:41.858484 494126 ssh_runner.go:235] Completed: which crictl: (1.046843529s)
I1129 09:01:41.858549 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1
I1129 09:01:41.858557 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:43.352594 494126 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.494004994s)
I1129 09:01:43.352634 494126 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1: (1.49406142s)
I1129 09:01:43.352657 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 from cache
I1129 09:01:43.352684 494126 containerd.go:285] Loading image: /var/lib/minikube/images/kube-apiserver_v1.34.1
I1129 09:01:43.352721 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:43.352741 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1
I1129 09:01:44.495181 494126 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.142420788s)
I1129 09:01:44.495251 494126 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1: (1.142485031s)
I1129 09:01:44.495274 494126 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:44.495280 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 from cache
I1129 09:01:44.495307 494126 containerd.go:285] Loading image: /var/lib/minikube/images/kube-proxy_v1.34.1
I1129 09:01:44.495357 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1
I1129 09:01:44.611298 493486 kubeadm.go:319] [apiclient] All control plane components are healthy after 5.002099 seconds
I1129 09:01:44.611461 493486 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1129 09:01:44.626505 493486 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1129 09:01:45.150669 493486 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1129 09:01:45.150981 493486 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-295154 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1129 09:01:45.666153 493486 kubeadm.go:319] [bootstrap-token] Using token: fc3siq.brm7sjv6bjwb7j34
I1129 09:01:45.667757 493486 out.go:252] - Configuring RBAC rules ...
I1129 09:01:45.667991 493486 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1129 09:01:45.673404 493486 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1129 09:01:45.685336 493486 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1129 09:01:45.691974 493486 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1129 09:01:45.695311 493486 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1129 09:01:45.698699 493486 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1129 09:01:45.712796 493486 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1129 09:01:45.913473 493486 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1129 09:01:46.081267 493486 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1129 09:01:46.081993 493486 kubeadm.go:319]
I1129 09:01:46.082087 493486 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1129 09:01:46.082095 493486 kubeadm.go:319]
I1129 09:01:46.082160 493486 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1129 09:01:46.082179 493486 kubeadm.go:319]
I1129 09:01:46.082199 493486 kubeadm.go:319] mkdir -p $HOME/.kube
I1129 09:01:46.082251 493486 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1129 09:01:46.082302 493486 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1129 09:01:46.082308 493486 kubeadm.go:319]
I1129 09:01:46.082372 493486 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1129 09:01:46.082377 493486 kubeadm.go:319]
I1129 09:01:46.082434 493486 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1129 09:01:46.082445 493486 kubeadm.go:319]
I1129 09:01:46.082520 493486 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1129 09:01:46.082627 493486 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1129 09:01:46.082750 493486 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1129 09:01:46.082756 493486 kubeadm.go:319]
I1129 09:01:46.082891 493486 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1129 09:01:46.083019 493486 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1129 09:01:46.083030 493486 kubeadm.go:319]
I1129 09:01:46.083149 493486 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token fc3siq.brm7sjv6bjwb7j34 \
I1129 09:01:46.083319 493486 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:cfb13a4080e942b53ddf5e01885fcdd270ac918e177076400130991e2b6b7778 \
I1129 09:01:46.083366 493486 kubeadm.go:319] --control-plane
I1129 09:01:46.083383 493486 kubeadm.go:319]
I1129 09:01:46.083539 493486 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1129 09:01:46.083561 493486 kubeadm.go:319]
I1129 09:01:46.083696 493486 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token fc3siq.brm7sjv6bjwb7j34 \
I1129 09:01:46.083889 493486 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:cfb13a4080e942b53ddf5e01885fcdd270ac918e177076400130991e2b6b7778
I1129 09:01:46.087692 493486 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1129 09:01:46.087874 493486 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1129 09:01:46.087925 493486 cni.go:84] Creating CNI manager for ""
I1129 09:01:46.087942 493486 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1129 09:01:46.089437 493486 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1129 09:01:46.093295 493486 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1129 09:01:46.100033 493486 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1129 09:01:46.100061 493486 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1129 09:01:46.118046 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1129 09:01:47.108562 493486 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1129 09:01:47.108767 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:47.108838 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-295154 minikube.k8s.io/updated_at=2025_11_29T09_01_47_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=d0eb20ec824c82ab3f24099c8b785e0a2a5789af minikube.k8s.io/name=old-k8s-version-295154 minikube.k8s.io/primary=true
I1129 09:01:47.209163 493486 ops.go:34] apiserver oom_adj: -16
I1129 09:01:47.209168 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:47.709726 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:48.209857 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:44.521775 494126 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5
I1129 09:01:44.521916 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5
I1129 09:01:45.636811 494126 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1: (1.141419574s)
I1129 09:01:45.636849 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 from cache
I1129 09:01:45.636857 494126 ssh_runner.go:235] Completed: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: (1.114924181s)
I1129 09:01:45.636879 494126 ssh_runner.go:352] existence check for /var/lib/minikube/images/storage-provisioner_v5: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/storage-provisioner_v5': No such file or directory
I1129 09:01:45.636882 494126 containerd.go:285] Loading image: /var/lib/minikube/images/etcd_3.6.4-0
I1129 09:01:45.636902 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 --> /var/lib/minikube/images/storage-provisioner_v5 (9060352 bytes)
I1129 09:01:45.636924 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0
I1129 09:01:48.452908 494126 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0: (2.815950505s)
I1129 09:01:48.452936 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 from cache
I1129 09:01:48.452972 494126 containerd.go:285] Loading image: /var/lib/minikube/images/storage-provisioner_v5
I1129 09:01:48.453041 494126 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5
I1129 09:01:49.370622 494126 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-255825/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
I1129 09:01:49.370663 494126 cache_images.go:125] Successfully loaded all cached images
I1129 09:01:49.370668 494126 cache_images.go:94] duration metric: took 10.495116704s to LoadCachedImages
I1129 09:01:49.370682 494126 kubeadm.go:935] updating node { 192.168.103.2 8443 v1.34.1 containerd true true} ...
I1129 09:01:49.370811 494126 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-924441 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.103.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:no-preload-924441 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1129 09:01:49.370873 494126 ssh_runner.go:195] Run: sudo crictl info
I1129 09:01:49.397690 494126 cni.go:84] Creating CNI manager for ""
I1129 09:01:49.397714 494126 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1129 09:01:49.397740 494126 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1129 09:01:49.397786 494126 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.103.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-924441 NodeName:no-preload-924441 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.103.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.103.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1129 09:01:49.397929 494126 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.103.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "no-preload-924441"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.103.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.103.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1129 09:01:49.397999 494126 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1129 09:01:49.407101 494126 binaries.go:54] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.34.1: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/binaries/v1.34.1': No such file or directory
Initiating transfer...
I1129 09:01:49.407180 494126 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.34.1
I1129 09:01:49.415958 494126 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl.sha256
I1129 09:01:49.415978 494126 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet.sha256
I1129 09:01:49.416026 494126 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1129 09:01:49.416047 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl
I1129 09:01:49.415978 494126 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm.sha256
I1129 09:01:49.416149 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm
I1129 09:01:49.429834 494126 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubectl: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubectl': No such file or directory
I1129 09:01:49.429872 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/linux/amd64/v1.34.1/kubectl --> /var/lib/minikube/binaries/v1.34.1/kubectl (60559544 bytes)
I1129 09:01:49.429915 494126 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubeadm: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubeadm': No such file or directory
I1129 09:01:49.429924 494126 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet
I1129 09:01:49.429943 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/linux/amd64/v1.34.1/kubeadm --> /var/lib/minikube/binaries/v1.34.1/kubeadm (74027192 bytes)
I1129 09:01:49.438987 494126 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubelet: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubelet': No such file or directory
I1129 09:01:49.439024 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/cache/linux/amd64/v1.34.1/kubelet --> /var/lib/minikube/binaries/v1.34.1/kubelet (59195684 bytes)
I1129 09:01:46.884140 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:01:48.710027 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:49.210030 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:49.709395 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:50.209866 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:50.709354 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:51.209979 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:51.710291 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:52.209895 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:52.709970 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:53.209937 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:49.969644 494126 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1129 09:01:49.978574 494126 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (322 bytes)
I1129 09:01:49.992833 494126 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1129 09:01:50.009876 494126 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2232 bytes)
I1129 09:01:50.023695 494126 ssh_runner.go:195] Run: grep 192.168.103.2 control-plane.minikube.internal$ /etc/hosts
I1129 09:01:50.027747 494126 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.103.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1129 09:01:50.038376 494126 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:01:50.121247 494126 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1129 09:01:50.149394 494126 certs.go:69] Setting up /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441 for IP: 192.168.103.2
I1129 09:01:50.149417 494126 certs.go:195] generating shared ca certs ...
I1129 09:01:50.149438 494126 certs.go:227] acquiring lock for ca certs: {Name:mk5e6bcae0a6944966b241f3c6197a472703c991 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:50.149602 494126 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/22000-255825/.minikube/ca.key
I1129 09:01:50.149703 494126 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/22000-255825/.minikube/proxy-client-ca.key
I1129 09:01:50.149717 494126 certs.go:257] generating profile certs ...
I1129 09:01:50.149797 494126 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/client.key
I1129 09:01:50.149812 494126 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/client.crt with IP's: []
I1129 09:01:50.352856 494126 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/client.crt ...
I1129 09:01:50.352896 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/client.crt: {Name:mk24ad5255d5c075502606493622eaafcc9932fa Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:50.353102 494126 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/client.key ...
I1129 09:01:50.353115 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/client.key: {Name:mkdb2263ef25fafc1ea0385357022f8199c8aa35 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:50.353223 494126 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.key.f72e5c7b
I1129 09:01:50.353240 494126 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.crt.f72e5c7b with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.103.2]
I1129 09:01:50.513341 494126 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.crt.f72e5c7b ...
I1129 09:01:50.513379 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.crt.f72e5c7b: {Name:mk3f760c06958b6df21bcc9bde3527a0c97ad882 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:50.513582 494126 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.key.f72e5c7b ...
I1129 09:01:50.513601 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.key.f72e5c7b: {Name:mk4c8be15a8f6eca407c52c7afdc7ecb10357a29 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:50.513678 494126 certs.go:382] copying /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.crt.f72e5c7b -> /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.crt
I1129 09:01:50.513771 494126 certs.go:386] copying /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.key.f72e5c7b -> /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.key
I1129 09:01:50.513831 494126 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.key
I1129 09:01:50.513847 494126 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.crt with IP's: []
I1129 09:01:50.651114 494126 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.crt ...
I1129 09:01:50.651146 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.crt: {Name:mkbdace4e62ecdfbe11ae904155295b956ffc842 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:50.651330 494126 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.key ...
I1129 09:01:50.651343 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.key: {Name:mk14d837fb2449197c689047daf9f07db1da4b8c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:50.651522 494126 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/259483.pem (1338 bytes)
W1129 09:01:50.651563 494126 certs.go:480] ignoring /home/jenkins/minikube-integration/22000-255825/.minikube/certs/259483_empty.pem, impossibly tiny 0 bytes
I1129 09:01:50.651573 494126 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca-key.pem (1675 bytes)
I1129 09:01:50.651652 494126 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/ca.pem (1078 bytes)
I1129 09:01:50.651691 494126 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/cert.pem (1123 bytes)
I1129 09:01:50.651714 494126 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/certs/key.pem (1679 bytes)
I1129 09:01:50.651769 494126 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem (1708 bytes)
I1129 09:01:50.652337 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1129 09:01:50.672071 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1129 09:01:50.691184 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1129 09:01:50.711306 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1129 09:01:50.730860 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1129 09:01:50.750662 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1671 bytes)
I1129 09:01:50.771690 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1129 09:01:50.791789 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/profiles/no-preload-924441/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1129 09:01:50.811356 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/certs/259483.pem --> /usr/share/ca-certificates/259483.pem (1338 bytes)
I1129 09:01:50.833983 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/files/etc/ssl/certs/2594832.pem --> /usr/share/ca-certificates/2594832.pem (1708 bytes)
I1129 09:01:50.853036 494126 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-255825/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1129 09:01:50.871262 494126 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1129 09:01:50.885099 494126 ssh_runner.go:195] Run: openssl version
I1129 09:01:50.892072 494126 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/259483.pem && ln -fs /usr/share/ca-certificates/259483.pem /etc/ssl/certs/259483.pem"
I1129 09:01:50.901864 494126 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/259483.pem
I1129 09:01:50.906616 494126 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 29 08:35 /usr/share/ca-certificates/259483.pem
I1129 09:01:50.906675 494126 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/259483.pem
I1129 09:01:50.943595 494126 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/259483.pem /etc/ssl/certs/51391683.0"
I1129 09:01:50.953459 494126 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/2594832.pem && ln -fs /usr/share/ca-certificates/2594832.pem /etc/ssl/certs/2594832.pem"
I1129 09:01:50.962610 494126 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2594832.pem
I1129 09:01:50.966703 494126 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 29 08:35 /usr/share/ca-certificates/2594832.pem
I1129 09:01:50.966778 494126 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2594832.pem
I1129 09:01:51.002253 494126 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/2594832.pem /etc/ssl/certs/3ec20f2e.0"
I1129 09:01:51.012487 494126 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1129 09:01:51.022391 494126 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1129 09:01:51.026710 494126 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 29 08:29 /usr/share/ca-certificates/minikubeCA.pem
I1129 09:01:51.026814 494126 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1129 09:01:51.063394 494126 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1129 09:01:51.073278 494126 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1129 09:01:51.077328 494126 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1129 09:01:51.077396 494126 kubeadm.go:401] StartCluster: {Name:no-preload-924441 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-924441 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISer
verNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFir
mwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1129 09:01:51.077489 494126 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1129 09:01:51.077532 494126 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1129 09:01:51.106096 494126 cri.go:89] found id: ""
I1129 09:01:51.106183 494126 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1129 09:01:51.115333 494126 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1129 09:01:51.123937 494126 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1129 09:01:51.124003 494126 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1129 09:01:51.132534 494126 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1129 09:01:51.132560 494126 kubeadm.go:158] found existing configuration files:
I1129 09:01:51.132605 494126 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1129 09:01:51.140877 494126 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1129 09:01:51.140937 494126 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1129 09:01:51.149370 494126 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1129 09:01:51.157660 494126 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1129 09:01:51.157716 494126 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1129 09:01:51.165600 494126 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1129 09:01:51.173968 494126 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1129 09:01:51.174023 494126 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1129 09:01:51.182141 494126 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1129 09:01:51.190488 494126 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1129 09:01:51.190548 494126 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1129 09:01:51.198568 494126 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1129 09:01:51.257848 494126 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1129 09:01:51.317135 494126 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1129 09:01:51.885035 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1129 09:01:51.885110 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:01:51.885188 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:01:51.917617 460401 cri.go:89] found id: "7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:01:51.917638 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:51.917644 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:51.917647 460401 cri.go:89] found id: ""
I1129 09:01:51.917655 460401 logs.go:282] 3 containers: [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:01:51.917717 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:51.923877 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:51.929304 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:51.934465 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:01:51.934561 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:01:51.963685 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:51.963708 460401 cri.go:89] found id: ""
I1129 09:01:51.963719 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:01:51.963801 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:51.968956 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:01:51.969028 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:01:51.996971 460401 cri.go:89] found id: ""
I1129 09:01:51.997000 460401 logs.go:282] 0 containers: []
W1129 09:01:51.997007 460401 logs.go:284] No container was found matching "coredns"
I1129 09:01:51.997013 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:01:51.997078 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:01:52.028822 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:01:52.028850 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:52.028856 460401 cri.go:89] found id: ""
I1129 09:01:52.028866 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:01:52.028936 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:52.034812 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:52.039943 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:01:52.040009 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:01:52.069835 460401 cri.go:89] found id: ""
I1129 09:01:52.069866 460401 logs.go:282] 0 containers: []
W1129 09:01:52.069878 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:01:52.069886 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:01:52.069952 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:01:52.104321 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:52.104340 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:52.104344 460401 cri.go:89] found id: ""
I1129 09:01:52.104352 460401 logs.go:282] 2 containers: [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:01:52.104402 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:52.109901 460401 ssh_runner.go:195] Run: which crictl
I1129 09:01:52.114778 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:01:52.114862 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:01:52.144981 460401 cri.go:89] found id: ""
I1129 09:01:52.145005 460401 logs.go:282] 0 containers: []
W1129 09:01:52.145013 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:01:52.145019 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:01:52.145069 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:01:52.174604 460401 cri.go:89] found id: ""
I1129 09:01:52.174632 460401 logs.go:282] 0 containers: []
W1129 09:01:52.174641 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:01:52.174651 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:01:52.174665 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:01:52.207427 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:01:52.207458 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:01:52.249558 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:01:52.249600 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:01:52.300742 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:01:52.300785 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:01:52.385321 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:01:52.385365 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:01:52.405491 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:01:52.405533 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:01:52.448465 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:01:52.448502 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:01:52.489466 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:01:52.489506 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:01:52.534107 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:01:52.534146 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:01:52.572361 460401 logs.go:123] Gathering logs for container status ...
I1129 09:01:52.572401 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:01:52.606656 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:01:52.606692 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1129 09:01:53.710005 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:54.209471 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:54.709414 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:55.209967 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:55.709378 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:56.210032 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:56.709982 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:57.209266 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:57.709968 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:58.209425 493486 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:01:58.303052 493486 kubeadm.go:1114] duration metric: took 11.19438409s to wait for elevateKubeSystemPrivileges
I1129 09:01:58.303107 493486 kubeadm.go:403] duration metric: took 21.598001105s to StartCluster
I1129 09:01:58.303162 493486 settings.go:142] acquiring lock: {Name:mk6dbed29e5e99d89b1cbbd9e561d8f8791ae9ae Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:58.303278 493486 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/22000-255825/kubeconfig
I1129 09:01:58.305561 493486 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/kubeconfig: {Name:mk7d91966efd00ccef892cf02f31ec14469accbd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:01:58.305924 493486 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1129 09:01:58.306112 493486 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1129 09:01:58.306351 493486 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1129 09:01:58.306713 493486 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-295154"
I1129 09:01:58.306776 493486 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-295154"
I1129 09:01:58.306795 493486 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-295154"
I1129 09:01:58.306776 493486 config.go:182] Loaded profile config "old-k8s-version-295154": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1129 09:01:58.306807 493486 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-295154"
I1129 09:01:58.306834 493486 host.go:66] Checking if "old-k8s-version-295154" exists ...
I1129 09:01:58.307864 493486 out.go:179] * Verifying Kubernetes components...
I1129 09:01:58.307930 493486 cli_runner.go:164] Run: docker container inspect old-k8s-version-295154 --format={{.State.Status}}
I1129 09:01:58.308039 493486 cli_runner.go:164] Run: docker container inspect old-k8s-version-295154 --format={{.State.Status}}
I1129 09:01:58.309327 493486 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:01:58.335085 493486 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-295154"
I1129 09:01:58.335144 493486 host.go:66] Checking if "old-k8s-version-295154" exists ...
I1129 09:01:58.335642 493486 cli_runner.go:164] Run: docker container inspect old-k8s-version-295154 --format={{.State.Status}}
I1129 09:01:58.337139 493486 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:01:58.338693 493486 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1129 09:01:58.338716 493486 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1129 09:01:58.338899 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:58.368947 493486 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1129 09:01:58.368979 493486 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1129 09:01:58.369072 493486 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-295154
I1129 09:01:58.378680 493486 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa Username:docker}
I1129 09:01:58.399464 493486 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/old-k8s-version-295154/id_rsa Username:docker}
I1129 09:01:58.438617 493486 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1129 09:01:58.498671 493486 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1129 09:01:58.528524 493486 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1129 09:01:58.536443 493486 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1129 09:01:58.718007 493486 start.go:977] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I1129 09:01:58.719713 493486 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-295154" to be "Ready" ...
I1129 09:01:58.976512 493486 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1129 09:02:01.574795 494126 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1129 09:02:01.574869 494126 kubeadm.go:319] [preflight] Running pre-flight checks
I1129 09:02:01.575071 494126 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1129 09:02:01.575154 494126 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1129 09:02:01.575204 494126 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1129 09:02:01.575304 494126 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1129 09:02:01.575403 494126 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1129 09:02:01.575496 494126 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1129 09:02:01.575567 494126 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1129 09:02:01.575645 494126 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1129 09:02:01.575713 494126 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1129 09:02:01.575809 494126 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1129 09:02:01.575872 494126 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1129 09:02:01.575964 494126 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1129 09:02:01.576092 494126 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1129 09:02:01.576217 494126 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1129 09:02:01.576325 494126 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1129 09:02:01.578171 494126 out.go:252] - Generating certificates and keys ...
I1129 09:02:01.578298 494126 kubeadm.go:319] [certs] Using existing ca certificate authority
I1129 09:02:01.578401 494126 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1129 09:02:01.578499 494126 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1129 09:02:01.578589 494126 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1129 09:02:01.578680 494126 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1129 09:02:01.578785 494126 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1129 09:02:01.578876 494126 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1129 09:02:01.579019 494126 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost no-preload-924441] and IPs [192.168.103.2 127.0.0.1 ::1]
I1129 09:02:01.579122 494126 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1129 09:02:01.579311 494126 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost no-preload-924441] and IPs [192.168.103.2 127.0.0.1 ::1]
I1129 09:02:01.579420 494126 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1129 09:02:01.579532 494126 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1129 09:02:01.579609 494126 kubeadm.go:319] [certs] Generating "sa" key and public key
I1129 09:02:01.579696 494126 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1129 09:02:01.579806 494126 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1129 09:02:01.579894 494126 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1129 09:02:01.579971 494126 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1129 09:02:01.580076 494126 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1129 09:02:01.580125 494126 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1129 09:02:01.580195 494126 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1129 09:02:01.580259 494126 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1129 09:02:01.582121 494126 out.go:252] - Booting up control plane ...
I1129 09:02:01.582267 494126 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1129 09:02:01.582364 494126 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1129 09:02:01.582460 494126 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1129 09:02:01.582603 494126 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1129 09:02:01.582773 494126 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1129 09:02:01.582902 494126 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1129 09:02:01.583026 494126 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1129 09:02:01.583068 494126 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1129 09:02:01.583182 494126 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1129 09:02:01.583325 494126 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1129 09:02:01.583413 494126 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.001845652s
I1129 09:02:01.583537 494126 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1129 09:02:01.583671 494126 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.103.2:8443/livez
I1129 09:02:01.583787 494126 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1129 09:02:01.583879 494126 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1129 09:02:01.583985 494126 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 1.852889014s
I1129 09:02:01.584071 494126 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 2.023243656s
I1129 09:02:01.584163 494126 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 4.00195345s
I1129 09:02:01.584314 494126 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1129 09:02:01.584493 494126 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1129 09:02:01.584584 494126 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1129 09:02:01.584867 494126 kubeadm.go:319] [mark-control-plane] Marking the node no-preload-924441 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1129 09:02:01.584955 494126 kubeadm.go:319] [bootstrap-token] Using token: mvtuq7.pg2byk8o9fh5nfa2
I1129 09:02:01.587787 494126 out.go:252] - Configuring RBAC rules ...
I1129 09:02:01.587916 494126 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1129 09:02:01.588028 494126 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1129 09:02:01.588232 494126 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1129 09:02:01.588384 494126 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1129 09:02:01.588517 494126 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1129 09:02:01.588635 494126 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1129 09:02:01.588779 494126 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1129 09:02:01.588837 494126 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1129 09:02:01.588907 494126 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1129 09:02:01.588916 494126 kubeadm.go:319]
I1129 09:02:01.589016 494126 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1129 09:02:01.589032 494126 kubeadm.go:319]
I1129 09:02:01.589151 494126 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1129 09:02:01.589160 494126 kubeadm.go:319]
I1129 09:02:01.589205 494126 kubeadm.go:319] mkdir -p $HOME/.kube
I1129 09:02:01.589280 494126 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1129 09:02:01.589374 494126 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1129 09:02:01.589388 494126 kubeadm.go:319]
I1129 09:02:01.589465 494126 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1129 09:02:01.589473 494126 kubeadm.go:319]
I1129 09:02:01.589554 494126 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1129 09:02:01.589563 494126 kubeadm.go:319]
I1129 09:02:01.589607 494126 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1129 09:02:01.589671 494126 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1129 09:02:01.589782 494126 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1129 09:02:01.589795 494126 kubeadm.go:319]
I1129 09:02:01.589906 494126 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1129 09:02:01.590049 494126 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1129 09:02:01.590058 494126 kubeadm.go:319]
I1129 09:02:01.590132 494126 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token mvtuq7.pg2byk8o9fh5nfa2 \
I1129 09:02:01.590268 494126 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:cfb13a4080e942b53ddf5e01885fcdd270ac918e177076400130991e2b6b7778 \
I1129 09:02:01.590302 494126 kubeadm.go:319] --control-plane
I1129 09:02:01.590309 494126 kubeadm.go:319]
I1129 09:02:01.590425 494126 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1129 09:02:01.590434 494126 kubeadm.go:319]
I1129 09:02:01.590567 494126 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token mvtuq7.pg2byk8o9fh5nfa2 \
I1129 09:02:01.590744 494126 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:cfb13a4080e942b53ddf5e01885fcdd270ac918e177076400130991e2b6b7778
I1129 09:02:01.590761 494126 cni.go:84] Creating CNI manager for ""
I1129 09:02:01.590770 494126 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1129 09:02:01.592367 494126 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1129 09:01:58.977447 493486 addons.go:530] duration metric: took 671.096745ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1129 09:01:59.226693 493486 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-295154" context rescaled to 1 replicas
W1129 09:02:00.723077 493486 node_ready.go:57] node "old-k8s-version-295154" has "Ready":"False" status (will retry)
W1129 09:02:02.723240 493486 node_ready.go:57] node "old-k8s-version-295154" has "Ready":"False" status (will retry)
I1129 09:02:01.593492 494126 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1129 09:02:01.598544 494126 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.1/kubectl ...
I1129 09:02:01.598567 494126 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1129 09:02:01.615144 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1129 09:02:01.883935 494126 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1129 09:02:01.884024 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:01.884114 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes no-preload-924441 minikube.k8s.io/updated_at=2025_11_29T09_02_01_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=d0eb20ec824c82ab3f24099c8b785e0a2a5789af minikube.k8s.io/name=no-preload-924441 minikube.k8s.io/primary=true
I1129 09:02:01.969638 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:01.982178 494126 ops.go:34] apiserver oom_adj: -16
I1129 09:02:02.470301 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:02.969878 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:03.470379 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:03.970554 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:04.469853 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:02.669495 460401 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (10.062771993s)
W1129 09:02:02.669547 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Unable to connect to the server: net/http: TLS handshake timeout
output:
** stderr **
Unable to connect to the server: net/http: TLS handshake timeout
** /stderr **
I1129 09:02:02.669577 460401 logs.go:123] Gathering logs for kube-apiserver [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1] ...
I1129 09:02:02.669596 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:02.710559 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:02:02.710605 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:04.970119 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:05.470767 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:05.969852 494126 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:02:06.052010 494126 kubeadm.go:1114] duration metric: took 4.168052566s to wait for elevateKubeSystemPrivileges
I1129 09:02:06.052057 494126 kubeadm.go:403] duration metric: took 14.974666914s to StartCluster
I1129 09:02:06.052081 494126 settings.go:142] acquiring lock: {Name:mk6dbed29e5e99d89b1cbbd9e561d8f8791ae9ae Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:02:06.052174 494126 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/22000-255825/kubeconfig
I1129 09:02:06.054258 494126 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-255825/kubeconfig: {Name:mk7d91966efd00ccef892cf02f31ec14469accbd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:02:06.054571 494126 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1129 09:02:06.054563 494126 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1129 09:02:06.054635 494126 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1129 09:02:06.054874 494126 config.go:182] Loaded profile config "no-preload-924441": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1129 09:02:06.054888 494126 addons.go:70] Setting storage-provisioner=true in profile "no-preload-924441"
I1129 09:02:06.054933 494126 addons.go:70] Setting default-storageclass=true in profile "no-preload-924441"
I1129 09:02:06.054947 494126 addons.go:239] Setting addon storage-provisioner=true in "no-preload-924441"
I1129 09:02:06.054963 494126 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "no-preload-924441"
I1129 09:02:06.055012 494126 host.go:66] Checking if "no-preload-924441" exists ...
I1129 09:02:06.055424 494126 cli_runner.go:164] Run: docker container inspect no-preload-924441 --format={{.State.Status}}
I1129 09:02:06.055667 494126 cli_runner.go:164] Run: docker container inspect no-preload-924441 --format={{.State.Status}}
I1129 09:02:06.056967 494126 out.go:179] * Verifying Kubernetes components...
I1129 09:02:06.060417 494126 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:02:06.083076 494126 addons.go:239] Setting addon default-storageclass=true in "no-preload-924441"
I1129 09:02:06.083127 494126 host.go:66] Checking if "no-preload-924441" exists ...
I1129 09:02:06.083615 494126 cli_runner.go:164] Run: docker container inspect no-preload-924441 --format={{.State.Status}}
I1129 09:02:06.086028 494126 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:02:06.087100 494126 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1129 09:02:06.087121 494126 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1129 09:02:06.087200 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:02:06.110337 494126 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1129 09:02:06.110366 494126 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1129 09:02:06.111183 494126 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-924441
I1129 09:02:06.116769 494126 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa Username:docker}
I1129 09:02:06.140007 494126 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/22000-255825/.minikube/machines/no-preload-924441/id_rsa Username:docker}
I1129 09:02:06.151655 494126 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.103.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1129 09:02:06.208406 494126 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1129 09:02:06.241470 494126 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1129 09:02:06.273558 494126 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1129 09:02:06.324896 494126 start.go:977] {"host.minikube.internal": 192.168.103.1} host record injected into CoreDNS's ConfigMap
I1129 09:02:06.327889 494126 node_ready.go:35] waiting up to 6m0s for node "no-preload-924441" to be "Ready" ...
I1129 09:02:06.574594 494126 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
W1129 09:02:05.223590 493486 node_ready.go:57] node "old-k8s-version-295154" has "Ready":"False" status (will retry)
W1129 09:02:07.223929 493486 node_ready.go:57] node "old-k8s-version-295154" has "Ready":"False" status (will retry)
I1129 09:02:06.575644 494126 addons.go:530] duration metric: took 521.007476ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1129 09:02:06.830448 494126 kapi.go:214] "coredns" deployment in "kube-system" namespace and "no-preload-924441" context rescaled to 1 replicas
W1129 09:02:08.331406 494126 node_ready.go:57] node "no-preload-924441" has "Ready":"False" status (will retry)
I1129 09:02:05.259668 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:02:07.201576 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": read tcp 192.168.85.1:43246->192.168.85.2:8443: read: connection reset by peer
I1129 09:02:07.201690 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:02:07.201778 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:02:07.234753 460401 cri.go:89] found id: "7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:07.234781 460401 cri.go:89] found id: "5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
I1129 09:02:07.234788 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:07.234793 460401 cri.go:89] found id: ""
I1129 09:02:07.234804 460401 logs.go:282] 3 containers: [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:02:07.234869 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.240257 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.245641 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.251131 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:02:07.251196 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:02:07.280579 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:07.280608 460401 cri.go:89] found id: ""
I1129 09:02:07.280621 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:02:07.280682 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.286123 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:02:07.286213 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:02:07.317491 460401 cri.go:89] found id: ""
I1129 09:02:07.317519 460401 logs.go:282] 0 containers: []
W1129 09:02:07.317528 460401 logs.go:284] No container was found matching "coredns"
I1129 09:02:07.317534 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:02:07.317586 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:02:07.347513 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:07.347534 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:07.347538 460401 cri.go:89] found id: ""
I1129 09:02:07.347546 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:02:07.347610 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.353144 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.358223 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:02:07.358303 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:02:07.387488 460401 cri.go:89] found id: ""
I1129 09:02:07.387516 460401 logs.go:282] 0 containers: []
W1129 09:02:07.387525 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:02:07.387532 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:02:07.387595 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:02:07.418490 460401 cri.go:89] found id: "c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:07.418512 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:07.418516 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:07.418519 460401 cri.go:89] found id: ""
I1129 09:02:07.418527 460401 logs.go:282] 3 containers: [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:02:07.418587 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.423956 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.429140 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:07.434196 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:02:07.434281 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:02:07.463114 460401 cri.go:89] found id: ""
I1129 09:02:07.463138 460401 logs.go:282] 0 containers: []
W1129 09:02:07.463148 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:02:07.463156 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:02:07.463222 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:02:07.494533 460401 cri.go:89] found id: ""
I1129 09:02:07.494567 460401 logs.go:282] 0 containers: []
W1129 09:02:07.494579 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:02:07.494592 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:02:07.494604 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:02:07.546238 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:02:07.546282 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:02:07.634664 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:02:07.634702 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:02:07.696753 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:02:07.696779 460401 logs.go:123] Gathering logs for kube-apiserver [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1] ...
I1129 09:02:07.696796 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:07.733303 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:02:07.733343 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:07.786770 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:02:07.786809 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:07.824791 460401 logs.go:123] Gathering logs for kube-controller-manager [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6] ...
I1129 09:02:07.824831 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:07.857029 460401 logs.go:123] Gathering logs for container status ...
I1129 09:02:07.857058 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:02:07.892009 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:02:07.892046 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:02:07.907552 460401 logs.go:123] Gathering logs for kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095] ...
I1129 09:02:07.907596 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
W1129 09:02:07.937558 460401 logs.go:130] failed kube-apiserver [5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095]: command: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095" /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095": Process exited with status 1
stdout:
stderr:
E1129 09:02:07.934436 4413 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095\": not found" containerID="5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
time="2025-11-29T09:02:07Z" level=fatal msg="rpc error: code = NotFound desc = an error occurred when try to find container \"5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095\": not found"
output:
** stderr **
E1129 09:02:07.934436 4413 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095\": not found" containerID="5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095"
time="2025-11-29T09:02:07Z" level=fatal msg="rpc error: code = NotFound desc = an error occurred when try to find container \"5e7b60288765099d1aa5333e90b5c31c9314dff5f9864968413148621de30095\": not found"
** /stderr **
I1129 09:02:07.937577 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:02:07.937591 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:07.976501 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:02:07.976553 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:08.017968 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:02:08.018008 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:08.049057 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:02:08.049090 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
W1129 09:02:09.723662 493486 node_ready.go:57] node "old-k8s-version-295154" has "Ready":"False" status (will retry)
W1129 09:02:12.223024 493486 node_ready.go:57] node "old-k8s-version-295154" has "Ready":"False" status (will retry)
I1129 09:02:13.224090 493486 node_ready.go:49] node "old-k8s-version-295154" is "Ready"
I1129 09:02:13.224128 493486 node_ready.go:38] duration metric: took 14.504358398s for node "old-k8s-version-295154" to be "Ready" ...
I1129 09:02:13.224148 493486 api_server.go:52] waiting for apiserver process to appear ...
I1129 09:02:13.224211 493486 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1129 09:02:13.243313 493486 api_server.go:72] duration metric: took 14.93733902s to wait for apiserver process to appear ...
I1129 09:02:13.243343 493486 api_server.go:88] waiting for apiserver healthz status ...
I1129 09:02:13.243370 493486 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1129 09:02:13.250694 493486 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I1129 09:02:13.251984 493486 api_server.go:141] control plane version: v1.28.0
I1129 09:02:13.252015 493486 api_server.go:131] duration metric: took 8.663278ms to wait for apiserver health ...
I1129 09:02:13.252026 493486 system_pods.go:43] waiting for kube-system pods to appear ...
I1129 09:02:13.255767 493486 system_pods.go:59] 8 kube-system pods found
I1129 09:02:13.255813 493486 system_pods.go:61] "coredns-5dd5756b68-phw28" [7fc2b8dd-43dd-43df-8887-9ffa6de36fb4] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:13.255822 493486 system_pods.go:61] "etcd-old-k8s-version-295154" [b49cf7c8-8d72-4db9-a96f-d796fd8d9e08] Running
I1129 09:02:13.255829 493486 system_pods.go:61] "kindnet-k4n9l" [74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8] Running
I1129 09:02:13.255835 493486 system_pods.go:61] "kube-apiserver-old-k8s-version-295154" [e4ca0771-197f-4d77-97f0-7a7778e227de] Running
I1129 09:02:13.255841 493486 system_pods.go:61] "kube-controller-manager-old-k8s-version-295154" [6825ac68-da0d-474d-ac97-53398adffd73] Running
I1129 09:02:13.255847 493486 system_pods.go:61] "kube-proxy-4rfb4" [05ef67c3-0d6e-453d-a0e5-81c649c3e033] Running
I1129 09:02:13.255853 493486 system_pods.go:61] "kube-scheduler-old-k8s-version-295154" [97d5e6fb-5cb8-4a03-a8df-3f76df5b2671] Running
I1129 09:02:13.255860 493486 system_pods.go:61] "storage-provisioner" [359871fd-a77c-430a-87c1-b313992718e2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:13.255869 493486 system_pods.go:74] duration metric: took 3.834915ms to wait for pod list to return data ...
I1129 09:02:13.255879 493486 default_sa.go:34] waiting for default service account to be created ...
I1129 09:02:13.259936 493486 default_sa.go:45] found service account: "default"
I1129 09:02:13.259965 493486 default_sa.go:55] duration metric: took 4.078247ms for default service account to be created ...
I1129 09:02:13.259977 493486 system_pods.go:116] waiting for k8s-apps to be running ...
I1129 09:02:13.264489 493486 system_pods.go:86] 8 kube-system pods found
I1129 09:02:13.264528 493486 system_pods.go:89] "coredns-5dd5756b68-phw28" [7fc2b8dd-43dd-43df-8887-9ffa6de36fb4] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:13.264536 493486 system_pods.go:89] "etcd-old-k8s-version-295154" [b49cf7c8-8d72-4db9-a96f-d796fd8d9e08] Running
I1129 09:02:13.264545 493486 system_pods.go:89] "kindnet-k4n9l" [74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8] Running
I1129 09:02:13.264554 493486 system_pods.go:89] "kube-apiserver-old-k8s-version-295154" [e4ca0771-197f-4d77-97f0-7a7778e227de] Running
I1129 09:02:13.264562 493486 system_pods.go:89] "kube-controller-manager-old-k8s-version-295154" [6825ac68-da0d-474d-ac97-53398adffd73] Running
I1129 09:02:13.264567 493486 system_pods.go:89] "kube-proxy-4rfb4" [05ef67c3-0d6e-453d-a0e5-81c649c3e033] Running
I1129 09:02:13.264572 493486 system_pods.go:89] "kube-scheduler-old-k8s-version-295154" [97d5e6fb-5cb8-4a03-a8df-3f76df5b2671] Running
I1129 09:02:13.264586 493486 system_pods.go:89] "storage-provisioner" [359871fd-a77c-430a-87c1-b313992718e2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:13.264615 493486 retry.go:31] will retry after 309.906184ms: missing components: kube-dns
W1129 09:02:10.832100 494126 node_ready.go:57] node "no-preload-924441" has "Ready":"False" status (will retry)
W1129 09:02:13.330706 494126 node_ready.go:57] node "no-preload-924441" has "Ready":"False" status (will retry)
I1129 09:02:10.584596 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:02:10.585082 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:02:10.585139 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:02:10.585192 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:02:10.615813 460401 cri.go:89] found id: "7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:10.615833 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:10.615837 460401 cri.go:89] found id: ""
I1129 09:02:10.615846 460401 logs.go:282] 2 containers: [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:02:10.615910 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.621079 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.625927 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:02:10.626017 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:02:10.655780 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:10.655808 460401 cri.go:89] found id: ""
I1129 09:02:10.655817 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:02:10.655877 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.661197 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:02:10.661278 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:02:10.692401 460401 cri.go:89] found id: ""
I1129 09:02:10.692423 460401 logs.go:282] 0 containers: []
W1129 09:02:10.692431 460401 logs.go:284] No container was found matching "coredns"
I1129 09:02:10.692436 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:02:10.692496 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:02:10.721278 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:10.721303 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:10.721309 460401 cri.go:89] found id: ""
I1129 09:02:10.721320 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:02:10.721387 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.726913 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.731556 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:02:10.731637 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:02:10.759345 460401 cri.go:89] found id: ""
I1129 09:02:10.759373 460401 logs.go:282] 0 containers: []
W1129 09:02:10.759381 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:02:10.759386 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:02:10.759446 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:02:10.790190 460401 cri.go:89] found id: "c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:10.790215 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:10.790221 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:10.790226 460401 cri.go:89] found id: ""
I1129 09:02:10.790236 460401 logs.go:282] 3 containers: [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:02:10.790305 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.795588 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.800622 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:10.805263 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:02:10.805338 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:02:10.834942 460401 cri.go:89] found id: ""
I1129 09:02:10.834973 460401 logs.go:282] 0 containers: []
W1129 09:02:10.834991 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:02:10.834999 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:02:10.835065 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:02:10.872503 460401 cri.go:89] found id: ""
I1129 09:02:10.872536 460401 logs.go:282] 0 containers: []
W1129 09:02:10.872547 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:02:10.872562 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:02:10.872586 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:10.926644 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:02:10.926681 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:10.965025 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:02:10.965069 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:10.998068 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:02:10.998102 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:02:11.043686 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:02:11.043743 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:02:11.134380 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:02:11.134422 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:02:11.150475 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:02:11.150510 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:02:11.210329 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:02:11.210348 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:02:11.210364 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:11.250422 460401 logs.go:123] Gathering logs for kube-controller-manager [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6] ...
I1129 09:02:11.250457 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:11.280219 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:02:11.280255 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:11.315565 460401 logs.go:123] Gathering logs for container status ...
I1129 09:02:11.315596 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:02:11.349327 460401 logs.go:123] Gathering logs for kube-apiserver [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1] ...
I1129 09:02:11.349358 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:11.384696 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:02:11.384729 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:13.923850 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:02:13.924341 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:02:13.924398 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:02:13.924461 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:02:13.954410 460401 cri.go:89] found id: "7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:13.954430 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:13.954434 460401 cri.go:89] found id: ""
I1129 09:02:13.954442 460401 logs.go:282] 2 containers: [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:02:13.954501 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:13.959624 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:13.964312 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:02:13.964377 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:02:13.992596 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:13.992625 460401 cri.go:89] found id: ""
I1129 09:02:13.992636 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:02:13.992703 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:13.998893 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:02:13.998972 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:02:14.028106 460401 cri.go:89] found id: ""
I1129 09:02:14.028140 460401 logs.go:282] 0 containers: []
W1129 09:02:14.028152 460401 logs.go:284] No container was found matching "coredns"
I1129 09:02:14.028161 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:02:14.028230 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:02:14.057393 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:14.057414 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:14.057418 460401 cri.go:89] found id: ""
I1129 09:02:14.057427 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:02:14.057482 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:14.062623 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:14.067579 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:02:14.067654 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:02:14.102801 460401 cri.go:89] found id: ""
I1129 09:02:14.102840 460401 logs.go:282] 0 containers: []
W1129 09:02:14.102853 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:02:14.102860 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:02:14.102925 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:02:14.135951 460401 cri.go:89] found id: "c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:14.135979 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:14.135985 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:14.135988 460401 cri.go:89] found id: ""
I1129 09:02:14.135998 460401 logs.go:282] 3 containers: [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:02:14.136064 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:14.141983 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:14.147316 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:14.152463 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:02:14.152555 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:02:14.181365 460401 cri.go:89] found id: ""
I1129 09:02:14.181398 460401 logs.go:282] 0 containers: []
W1129 09:02:14.181409 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:02:14.181417 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:02:14.181477 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:02:14.210267 460401 cri.go:89] found id: ""
I1129 09:02:14.210292 460401 logs.go:282] 0 containers: []
W1129 09:02:14.210300 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:02:14.210310 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:02:14.210323 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:02:14.298625 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:02:14.298662 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:02:14.315504 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:02:14.315529 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:14.357098 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:02:14.357134 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:14.407082 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:02:14.407133 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:14.441442 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:02:14.441482 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:14.476419 460401 logs.go:123] Gathering logs for container status ...
I1129 09:02:14.476452 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:02:13.579150 493486 system_pods.go:86] 8 kube-system pods found
I1129 09:02:13.579183 493486 system_pods.go:89] "coredns-5dd5756b68-phw28" [7fc2b8dd-43dd-43df-8887-9ffa6de36fb4] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:13.579189 493486 system_pods.go:89] "etcd-old-k8s-version-295154" [b49cf7c8-8d72-4db9-a96f-d796fd8d9e08] Running
I1129 09:02:13.579195 493486 system_pods.go:89] "kindnet-k4n9l" [74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8] Running
I1129 09:02:13.579199 493486 system_pods.go:89] "kube-apiserver-old-k8s-version-295154" [e4ca0771-197f-4d77-97f0-7a7778e227de] Running
I1129 09:02:13.579203 493486 system_pods.go:89] "kube-controller-manager-old-k8s-version-295154" [6825ac68-da0d-474d-ac97-53398adffd73] Running
I1129 09:02:13.579206 493486 system_pods.go:89] "kube-proxy-4rfb4" [05ef67c3-0d6e-453d-a0e5-81c649c3e033] Running
I1129 09:02:13.579210 493486 system_pods.go:89] "kube-scheduler-old-k8s-version-295154" [97d5e6fb-5cb8-4a03-a8df-3f76df5b2671] Running
I1129 09:02:13.579220 493486 system_pods.go:89] "storage-provisioner" [359871fd-a77c-430a-87c1-b313992718e2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:13.579237 493486 retry.go:31] will retry after 360.039109ms: missing components: kube-dns
I1129 09:02:13.944039 493486 system_pods.go:86] 8 kube-system pods found
I1129 09:02:13.944084 493486 system_pods.go:89] "coredns-5dd5756b68-phw28" [7fc2b8dd-43dd-43df-8887-9ffa6de36fb4] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:13.944094 493486 system_pods.go:89] "etcd-old-k8s-version-295154" [b49cf7c8-8d72-4db9-a96f-d796fd8d9e08] Running
I1129 09:02:13.944104 493486 system_pods.go:89] "kindnet-k4n9l" [74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8] Running
I1129 09:02:13.944110 493486 system_pods.go:89] "kube-apiserver-old-k8s-version-295154" [e4ca0771-197f-4d77-97f0-7a7778e227de] Running
I1129 09:02:13.944116 493486 system_pods.go:89] "kube-controller-manager-old-k8s-version-295154" [6825ac68-da0d-474d-ac97-53398adffd73] Running
I1129 09:02:13.944121 493486 system_pods.go:89] "kube-proxy-4rfb4" [05ef67c3-0d6e-453d-a0e5-81c649c3e033] Running
I1129 09:02:13.944127 493486 system_pods.go:89] "kube-scheduler-old-k8s-version-295154" [97d5e6fb-5cb8-4a03-a8df-3f76df5b2671] Running
I1129 09:02:13.944133 493486 system_pods.go:89] "storage-provisioner" [359871fd-a77c-430a-87c1-b313992718e2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:13.944166 493486 retry.go:31] will retry after 339.658127ms: missing components: kube-dns
I1129 09:02:14.288499 493486 system_pods.go:86] 8 kube-system pods found
I1129 09:02:14.288533 493486 system_pods.go:89] "coredns-5dd5756b68-phw28" [7fc2b8dd-43dd-43df-8887-9ffa6de36fb4] Running
I1129 09:02:14.288543 493486 system_pods.go:89] "etcd-old-k8s-version-295154" [b49cf7c8-8d72-4db9-a96f-d796fd8d9e08] Running
I1129 09:02:14.288548 493486 system_pods.go:89] "kindnet-k4n9l" [74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8] Running
I1129 09:02:14.288553 493486 system_pods.go:89] "kube-apiserver-old-k8s-version-295154" [e4ca0771-197f-4d77-97f0-7a7778e227de] Running
I1129 09:02:14.288563 493486 system_pods.go:89] "kube-controller-manager-old-k8s-version-295154" [6825ac68-da0d-474d-ac97-53398adffd73] Running
I1129 09:02:14.288568 493486 system_pods.go:89] "kube-proxy-4rfb4" [05ef67c3-0d6e-453d-a0e5-81c649c3e033] Running
I1129 09:02:14.288573 493486 system_pods.go:89] "kube-scheduler-old-k8s-version-295154" [97d5e6fb-5cb8-4a03-a8df-3f76df5b2671] Running
I1129 09:02:14.288578 493486 system_pods.go:89] "storage-provisioner" [359871fd-a77c-430a-87c1-b313992718e2] Running
I1129 09:02:14.288588 493486 system_pods.go:126] duration metric: took 1.028603527s to wait for k8s-apps to be running ...
I1129 09:02:14.288601 493486 system_svc.go:44] waiting for kubelet service to be running ....
I1129 09:02:14.288662 493486 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1129 09:02:14.302535 493486 system_svc.go:56] duration metric: took 13.922382ms WaitForService to wait for kubelet
I1129 09:02:14.302570 493486 kubeadm.go:587] duration metric: took 15.996603485s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1129 09:02:14.302594 493486 node_conditions.go:102] verifying NodePressure condition ...
I1129 09:02:14.305508 493486 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1129 09:02:14.305535 493486 node_conditions.go:123] node cpu capacity is 8
I1129 09:02:14.305552 493486 node_conditions.go:105] duration metric: took 2.953214ms to run NodePressure ...
I1129 09:02:14.305564 493486 start.go:242] waiting for startup goroutines ...
I1129 09:02:14.305570 493486 start.go:247] waiting for cluster config update ...
I1129 09:02:14.305583 493486 start.go:256] writing updated cluster config ...
I1129 09:02:14.305887 493486 ssh_runner.go:195] Run: rm -f paused
I1129 09:02:14.309803 493486 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1129 09:02:14.314558 493486 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-phw28" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.319446 493486 pod_ready.go:94] pod "coredns-5dd5756b68-phw28" is "Ready"
I1129 09:02:14.319479 493486 pod_ready.go:86] duration metric: took 4.889509ms for pod "coredns-5dd5756b68-phw28" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.322499 493486 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.326608 493486 pod_ready.go:94] pod "etcd-old-k8s-version-295154" is "Ready"
I1129 09:02:14.326631 493486 pod_ready.go:86] duration metric: took 4.109693ms for pod "etcd-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.329352 493486 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.333844 493486 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-295154" is "Ready"
I1129 09:02:14.333867 493486 pod_ready.go:86] duration metric: took 4.49688ms for pod "kube-apiserver-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.336686 493486 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.714439 493486 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-295154" is "Ready"
I1129 09:02:14.714472 493486 pod_ready.go:86] duration metric: took 377.765984ms for pod "kube-controller-manager-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:14.915822 493486 pod_ready.go:83] waiting for pod "kube-proxy-4rfb4" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:15.314552 493486 pod_ready.go:94] pod "kube-proxy-4rfb4" is "Ready"
I1129 09:02:15.314586 493486 pod_ready.go:86] duration metric: took 398.736001ms for pod "kube-proxy-4rfb4" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:15.515989 493486 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:15.913869 493486 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-295154" is "Ready"
I1129 09:02:15.913896 493486 pod_ready.go:86] duration metric: took 397.877691ms for pod "kube-scheduler-old-k8s-version-295154" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:15.913908 493486 pod_ready.go:40] duration metric: took 1.604073956s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1129 09:02:15.959941 493486 start.go:625] kubectl: 1.34.2, cluster: 1.28.0 (minor skew: 6)
I1129 09:02:15.961883 493486 out.go:203]
W1129 09:02:15.963183 493486 out.go:285] ! /usr/local/bin/kubectl is version 1.34.2, which may have incompatibilities with Kubernetes 1.28.0.
I1129 09:02:15.964449 493486 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1129 09:02:15.966035 493486 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-295154" cluster and "default" namespace by default
W1129 09:02:15.330798 494126 node_ready.go:57] node "no-preload-924441" has "Ready":"False" status (will retry)
W1129 09:02:17.331851 494126 node_ready.go:57] node "no-preload-924441" has "Ready":"False" status (will retry)
I1129 09:02:14.509454 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:02:14.509484 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:02:14.571273 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:02:14.571298 460401 logs.go:123] Gathering logs for kube-apiserver [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1] ...
I1129 09:02:14.571312 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:14.605440 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:02:14.605476 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:14.642678 460401 logs.go:123] Gathering logs for kube-controller-manager [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6] ...
I1129 09:02:14.642712 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:14.671483 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:02:14.671514 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:14.701619 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:02:14.701647 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:02:17.246912 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:02:17.247337 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:02:17.247422 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:02:17.247479 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:02:17.277610 460401 cri.go:89] found id: "7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:17.277632 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:17.277637 460401 cri.go:89] found id: ""
I1129 09:02:17.277647 460401 logs.go:282] 2 containers: [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:02:17.277711 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.283531 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.288554 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:02:17.288644 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:02:17.316819 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:17.316847 460401 cri.go:89] found id: ""
I1129 09:02:17.316857 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:02:17.316921 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.322640 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:02:17.322770 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:02:17.353531 460401 cri.go:89] found id: ""
I1129 09:02:17.353563 460401 logs.go:282] 0 containers: []
W1129 09:02:17.353575 460401 logs.go:284] No container was found matching "coredns"
I1129 09:02:17.353585 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:02:17.353651 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:02:17.384830 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:17.384854 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:17.384858 460401 cri.go:89] found id: ""
I1129 09:02:17.384867 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:02:17.384932 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.390132 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.395096 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:02:17.395177 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:02:17.425643 460401 cri.go:89] found id: ""
I1129 09:02:17.425681 460401 logs.go:282] 0 containers: []
W1129 09:02:17.425692 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:02:17.425704 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:02:17.425788 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:02:17.456077 460401 cri.go:89] found id: "c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:17.456105 460401 cri.go:89] found id: "2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:17.456113 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:17.456136 460401 cri.go:89] found id: ""
I1129 09:02:17.456148 460401 logs.go:282] 3 containers: [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:02:17.456213 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.461610 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.466727 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:17.471762 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:02:17.471849 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:02:17.501750 460401 cri.go:89] found id: ""
I1129 09:02:17.501782 460401 logs.go:282] 0 containers: []
W1129 09:02:17.501793 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:02:17.501801 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:02:17.501868 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:02:17.531903 460401 cri.go:89] found id: ""
I1129 09:02:17.531932 460401 logs.go:282] 0 containers: []
W1129 09:02:17.531942 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:02:17.531956 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:02:17.531972 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:02:17.630517 460401 logs.go:123] Gathering logs for kube-apiserver [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1] ...
I1129 09:02:17.630566 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:17.667169 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:02:17.667205 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:17.707311 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:02:17.707360 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:17.746580 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:02:17.746621 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:17.799162 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:02:17.799207 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:17.839313 460401 logs.go:123] Gathering logs for kube-controller-manager [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6] ...
I1129 09:02:17.839355 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:17.872700 460401 logs.go:123] Gathering logs for kube-controller-manager [2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a] ...
I1129 09:02:17.872742 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2f891797b465edfa86c2546293600d895d1c61c2f2a00d85b8482ff1b20cb71a"
I1129 09:02:17.904806 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:02:17.904838 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:02:17.920866 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:02:17.920904 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:02:17.983002 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:02:17.983027 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:02:17.983040 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:18.019203 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:02:18.019241 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:02:18.070893 460401 logs.go:123] Gathering logs for container status ...
I1129 09:02:18.070936 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
W1129 09:02:19.830479 494126 node_ready.go:57] node "no-preload-924441" has "Ready":"False" status (will retry)
I1129 09:02:20.833313 494126 node_ready.go:49] node "no-preload-924441" is "Ready"
I1129 09:02:20.833355 494126 node_ready.go:38] duration metric: took 14.505431475s for node "no-preload-924441" to be "Ready" ...
I1129 09:02:20.833377 494126 api_server.go:52] waiting for apiserver process to appear ...
I1129 09:02:20.833445 494126 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1129 09:02:20.850134 494126 api_server.go:72] duration metric: took 14.795523765s to wait for apiserver process to appear ...
I1129 09:02:20.850165 494126 api_server.go:88] waiting for apiserver healthz status ...
I1129 09:02:20.850190 494126 api_server.go:253] Checking apiserver healthz at https://192.168.103.2:8443/healthz ...
I1129 09:02:20.856514 494126 api_server.go:279] https://192.168.103.2:8443/healthz returned 200:
ok
I1129 09:02:20.857900 494126 api_server.go:141] control plane version: v1.34.1
I1129 09:02:20.857933 494126 api_server.go:131] duration metric: took 7.759312ms to wait for apiserver health ...
I1129 09:02:20.857945 494126 system_pods.go:43] waiting for kube-system pods to appear ...
I1129 09:02:20.861811 494126 system_pods.go:59] 8 kube-system pods found
I1129 09:02:20.861851 494126 system_pods.go:61] "coredns-66bc5c9577-nsh8w" [bf2a8ab9-aaca-4ee6-a390-a02099f693d9] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:20.861863 494126 system_pods.go:61] "etcd-no-preload-924441" [e3cda1b0-1ca8-4ded-a506-f728fc050781] Running
I1129 09:02:20.861871 494126 system_pods.go:61] "kindnet-nscfk" [052c2152-0369-4121-b2fe-25b79a00145a] Running
I1129 09:02:20.861877 494126 system_pods.go:61] "kube-apiserver-no-preload-924441" [08168b39-5d95-4d6b-ac99-3c6ee50a2530] Running
I1129 09:02:20.861892 494126 system_pods.go:61] "kube-controller-manager-no-preload-924441" [9e84b562-ff11-40c1-a7ab-3682dbbae4be] Running
I1129 09:02:20.861897 494126 system_pods.go:61] "kube-proxy-96fcg" [c9fd8592-2ec4-4da3-a800-b136c118d379] Running
I1129 09:02:20.861902 494126 system_pods.go:61] "kube-scheduler-no-preload-924441" [91fa5a87-81d7-4b1c-8334-9c5c4fcf8997] Running
I1129 09:02:20.861912 494126 system_pods.go:61] "storage-provisioner" [88b64cf8-3233-47bb-be31-6f367a8a1433] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:20.861920 494126 system_pods.go:74] duration metric: took 3.967151ms to wait for pod list to return data ...
I1129 09:02:20.861931 494126 default_sa.go:34] waiting for default service account to be created ...
I1129 09:02:20.864542 494126 default_sa.go:45] found service account: "default"
I1129 09:02:20.864569 494126 default_sa.go:55] duration metric: took 2.631761ms for default service account to be created ...
I1129 09:02:20.864581 494126 system_pods.go:116] waiting for k8s-apps to be running ...
I1129 09:02:20.867876 494126 system_pods.go:86] 8 kube-system pods found
I1129 09:02:20.867913 494126 system_pods.go:89] "coredns-66bc5c9577-nsh8w" [bf2a8ab9-aaca-4ee6-a390-a02099f693d9] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:20.867924 494126 system_pods.go:89] "etcd-no-preload-924441" [e3cda1b0-1ca8-4ded-a506-f728fc050781] Running
I1129 09:02:20.867932 494126 system_pods.go:89] "kindnet-nscfk" [052c2152-0369-4121-b2fe-25b79a00145a] Running
I1129 09:02:20.867938 494126 system_pods.go:89] "kube-apiserver-no-preload-924441" [08168b39-5d95-4d6b-ac99-3c6ee50a2530] Running
I1129 09:02:20.867999 494126 system_pods.go:89] "kube-controller-manager-no-preload-924441" [9e84b562-ff11-40c1-a7ab-3682dbbae4be] Running
I1129 09:02:20.868005 494126 system_pods.go:89] "kube-proxy-96fcg" [c9fd8592-2ec4-4da3-a800-b136c118d379] Running
I1129 09:02:20.868011 494126 system_pods.go:89] "kube-scheduler-no-preload-924441" [91fa5a87-81d7-4b1c-8334-9c5c4fcf8997] Running
I1129 09:02:20.868027 494126 system_pods.go:89] "storage-provisioner" [88b64cf8-3233-47bb-be31-6f367a8a1433] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:20.868077 494126 retry.go:31] will retry after 292.54579ms: missing components: kube-dns
I1129 09:02:21.165357 494126 system_pods.go:86] 8 kube-system pods found
I1129 09:02:21.165399 494126 system_pods.go:89] "coredns-66bc5c9577-nsh8w" [bf2a8ab9-aaca-4ee6-a390-a02099f693d9] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:21.165408 494126 system_pods.go:89] "etcd-no-preload-924441" [e3cda1b0-1ca8-4ded-a506-f728fc050781] Running
I1129 09:02:21.165416 494126 system_pods.go:89] "kindnet-nscfk" [052c2152-0369-4121-b2fe-25b79a00145a] Running
I1129 09:02:21.165422 494126 system_pods.go:89] "kube-apiserver-no-preload-924441" [08168b39-5d95-4d6b-ac99-3c6ee50a2530] Running
I1129 09:02:21.165428 494126 system_pods.go:89] "kube-controller-manager-no-preload-924441" [9e84b562-ff11-40c1-a7ab-3682dbbae4be] Running
I1129 09:02:21.165434 494126 system_pods.go:89] "kube-proxy-96fcg" [c9fd8592-2ec4-4da3-a800-b136c118d379] Running
I1129 09:02:21.165439 494126 system_pods.go:89] "kube-scheduler-no-preload-924441" [91fa5a87-81d7-4b1c-8334-9c5c4fcf8997] Running
I1129 09:02:21.165449 494126 system_pods.go:89] "storage-provisioner" [88b64cf8-3233-47bb-be31-6f367a8a1433] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:21.165470 494126 retry.go:31] will retry after 336.406198ms: missing components: kube-dns
I1129 09:02:21.505471 494126 system_pods.go:86] 8 kube-system pods found
I1129 09:02:21.505510 494126 system_pods.go:89] "coredns-66bc5c9577-nsh8w" [bf2a8ab9-aaca-4ee6-a390-a02099f693d9] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:02:21.505516 494126 system_pods.go:89] "etcd-no-preload-924441" [e3cda1b0-1ca8-4ded-a506-f728fc050781] Running
I1129 09:02:21.505524 494126 system_pods.go:89] "kindnet-nscfk" [052c2152-0369-4121-b2fe-25b79a00145a] Running
I1129 09:02:21.505528 494126 system_pods.go:89] "kube-apiserver-no-preload-924441" [08168b39-5d95-4d6b-ac99-3c6ee50a2530] Running
I1129 09:02:21.505531 494126 system_pods.go:89] "kube-controller-manager-no-preload-924441" [9e84b562-ff11-40c1-a7ab-3682dbbae4be] Running
I1129 09:02:21.505534 494126 system_pods.go:89] "kube-proxy-96fcg" [c9fd8592-2ec4-4da3-a800-b136c118d379] Running
I1129 09:02:21.505538 494126 system_pods.go:89] "kube-scheduler-no-preload-924441" [91fa5a87-81d7-4b1c-8334-9c5c4fcf8997] Running
I1129 09:02:21.505542 494126 system_pods.go:89] "storage-provisioner" [88b64cf8-3233-47bb-be31-6f367a8a1433] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:02:21.505560 494126 retry.go:31] will retry after 447.535618ms: missing components: kube-dns
I1129 09:02:21.957409 494126 system_pods.go:86] 8 kube-system pods found
I1129 09:02:21.957439 494126 system_pods.go:89] "coredns-66bc5c9577-nsh8w" [bf2a8ab9-aaca-4ee6-a390-a02099f693d9] Running
I1129 09:02:21.957444 494126 system_pods.go:89] "etcd-no-preload-924441" [e3cda1b0-1ca8-4ded-a506-f728fc050781] Running
I1129 09:02:21.957448 494126 system_pods.go:89] "kindnet-nscfk" [052c2152-0369-4121-b2fe-25b79a00145a] Running
I1129 09:02:21.957451 494126 system_pods.go:89] "kube-apiserver-no-preload-924441" [08168b39-5d95-4d6b-ac99-3c6ee50a2530] Running
I1129 09:02:21.957456 494126 system_pods.go:89] "kube-controller-manager-no-preload-924441" [9e84b562-ff11-40c1-a7ab-3682dbbae4be] Running
I1129 09:02:21.957459 494126 system_pods.go:89] "kube-proxy-96fcg" [c9fd8592-2ec4-4da3-a800-b136c118d379] Running
I1129 09:02:21.957464 494126 system_pods.go:89] "kube-scheduler-no-preload-924441" [91fa5a87-81d7-4b1c-8334-9c5c4fcf8997] Running
I1129 09:02:21.957467 494126 system_pods.go:89] "storage-provisioner" [88b64cf8-3233-47bb-be31-6f367a8a1433] Running
I1129 09:02:21.957476 494126 system_pods.go:126] duration metric: took 1.092887723s to wait for k8s-apps to be running ...
I1129 09:02:21.957498 494126 system_svc.go:44] waiting for kubelet service to be running ....
I1129 09:02:21.957549 494126 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1129 09:02:21.971582 494126 system_svc.go:56] duration metric: took 14.071974ms WaitForService to wait for kubelet
I1129 09:02:21.971613 494126 kubeadm.go:587] duration metric: took 15.917009838s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1129 09:02:21.971632 494126 node_conditions.go:102] verifying NodePressure condition ...
I1129 09:02:21.974426 494126 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1129 09:02:21.974453 494126 node_conditions.go:123] node cpu capacity is 8
I1129 09:02:21.974471 494126 node_conditions.go:105] duration metric: took 2.83418ms to run NodePressure ...
I1129 09:02:21.974485 494126 start.go:242] waiting for startup goroutines ...
I1129 09:02:21.974492 494126 start.go:247] waiting for cluster config update ...
I1129 09:02:21.974502 494126 start.go:256] writing updated cluster config ...
I1129 09:02:21.974780 494126 ssh_runner.go:195] Run: rm -f paused
I1129 09:02:21.978967 494126 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1129 09:02:21.982434 494126 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-nsh8w" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:21.986370 494126 pod_ready.go:94] pod "coredns-66bc5c9577-nsh8w" is "Ready"
I1129 09:02:21.986395 494126 pod_ready.go:86] duration metric: took 3.939701ms for pod "coredns-66bc5c9577-nsh8w" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:21.988365 494126 pod_ready.go:83] waiting for pod "etcd-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:21.991850 494126 pod_ready.go:94] pod "etcd-no-preload-924441" is "Ready"
I1129 09:02:21.991874 494126 pod_ready.go:86] duration metric: took 3.486388ms for pod "etcd-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:21.993587 494126 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:21.997072 494126 pod_ready.go:94] pod "kube-apiserver-no-preload-924441" is "Ready"
I1129 09:02:21.997092 494126 pod_ready.go:86] duration metric: took 3.484304ms for pod "kube-apiserver-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:21.998698 494126 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:22.382918 494126 pod_ready.go:94] pod "kube-controller-manager-no-preload-924441" is "Ready"
I1129 09:02:22.382948 494126 pod_ready.go:86] duration metric: took 384.232783ms for pod "kube-controller-manager-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:22.583125 494126 pod_ready.go:83] waiting for pod "kube-proxy-96fcg" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:22.982608 494126 pod_ready.go:94] pod "kube-proxy-96fcg" is "Ready"
I1129 09:02:22.982639 494126 pod_ready.go:86] duration metric: took 399.48383ms for pod "kube-proxy-96fcg" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:23.184031 494126 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:23.583027 494126 pod_ready.go:94] pod "kube-scheduler-no-preload-924441" is "Ready"
I1129 09:02:23.583058 494126 pod_ready.go:86] duration metric: took 399.00134ms for pod "kube-scheduler-no-preload-924441" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:02:23.583071 494126 pod_ready.go:40] duration metric: took 1.604064431s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1129 09:02:23.632822 494126 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1129 09:02:23.634677 494126 out.go:179] * Done! kubectl is now configured to use "no-preload-924441" cluster and "default" namespace by default
I1129 09:02:20.607959 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:02:20.608406 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:02:20.608469 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:02:20.608531 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:02:20.639116 460401 cri.go:89] found id: "7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:20.639148 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:20.639155 460401 cri.go:89] found id: ""
I1129 09:02:20.639168 460401 logs.go:282] 2 containers: [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:02:20.639240 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:20.644749 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:20.649347 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:02:20.649411 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:02:20.677383 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:20.677404 460401 cri.go:89] found id: ""
I1129 09:02:20.677413 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:02:20.677466 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:20.682625 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:02:20.682708 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:02:20.711021 460401 cri.go:89] found id: ""
I1129 09:02:20.711050 460401 logs.go:282] 0 containers: []
W1129 09:02:20.711060 460401 logs.go:284] No container was found matching "coredns"
I1129 09:02:20.711070 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:02:20.711138 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:02:20.745598 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:20.745626 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:20.745632 460401 cri.go:89] found id: ""
I1129 09:02:20.745643 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:02:20.745716 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:20.751838 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:20.757804 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:02:20.757881 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:02:20.793640 460401 cri.go:89] found id: ""
I1129 09:02:20.793671 460401 logs.go:282] 0 containers: []
W1129 09:02:20.793683 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:02:20.793691 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:02:20.793792 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:02:20.830071 460401 cri.go:89] found id: "c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:20.830099 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:20.830104 460401 cri.go:89] found id: ""
I1129 09:02:20.830114 460401 logs.go:282] 2 containers: [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:02:20.830179 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:20.837576 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:20.843146 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:02:20.843225 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:02:20.883480 460401 cri.go:89] found id: ""
I1129 09:02:20.883525 460401 logs.go:282] 0 containers: []
W1129 09:02:20.883536 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:02:20.883543 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:02:20.883598 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:02:20.923499 460401 cri.go:89] found id: ""
I1129 09:02:20.923532 460401 logs.go:282] 0 containers: []
W1129 09:02:20.923543 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:02:20.923557 460401 logs.go:123] Gathering logs for kube-controller-manager [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6] ...
I1129 09:02:20.923574 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:20.961675 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:02:20.961713 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:20.996489 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:02:20.996524 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:02:21.046535 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:02:21.046596 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:02:21.131239 460401 logs.go:123] Gathering logs for describe nodes ...
I1129 09:02:21.131286 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1129 09:02:21.192537 460401 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1129 09:02:21.192557 460401 logs.go:123] Gathering logs for kube-apiserver [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1] ...
I1129 09:02:21.192573 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:21.227894 460401 logs.go:123] Gathering logs for kube-apiserver [1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101] ...
I1129 09:02:21.227932 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:21.262592 460401 logs.go:123] Gathering logs for container status ...
I1129 09:02:21.262632 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1129 09:02:21.298034 460401 logs.go:123] Gathering logs for dmesg ...
I1129 09:02:21.298076 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1129 09:02:21.313593 460401 logs.go:123] Gathering logs for etcd [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625] ...
I1129 09:02:21.313626 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:21.355840 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:02:21.355878 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:21.409528 460401 logs.go:123] Gathering logs for kube-scheduler [1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea] ...
I1129 09:02:21.409570 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:23.946261 460401 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1129 09:02:23.946794 460401 api_server.go:269] stopped: https://192.168.85.2:8443/healthz: Get "https://192.168.85.2:8443/healthz": dial tcp 192.168.85.2:8443: connect: connection refused
I1129 09:02:23.946872 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1129 09:02:23.946940 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1129 09:02:23.978496 460401 cri.go:89] found id: "7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1"
I1129 09:02:23.978521 460401 cri.go:89] found id: "1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101"
I1129 09:02:23.978525 460401 cri.go:89] found id: ""
I1129 09:02:23.978533 460401 logs.go:282] 2 containers: [7a1e63397d000aac401e89cd0868663c584fe870c2ff14eb45f8a4367d4486b1 1fd4280706d61cbcf9886889f6bf4ab1611870c991359ef9ab1d4e394ae55101]
I1129 09:02:23.978585 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:23.983820 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:23.988502 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1129 09:02:23.988563 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1129 09:02:24.017479 460401 cri.go:89] found id: "f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625"
I1129 09:02:24.017505 460401 cri.go:89] found id: ""
I1129 09:02:24.017516 460401 logs.go:282] 1 containers: [f8848e5e1655cac3456277b8b0b7d18c4bad91fab69e433e92c22e3c33ff4625]
I1129 09:02:24.017581 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:24.022978 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1129 09:02:24.023049 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1129 09:02:24.054017 460401 cri.go:89] found id: ""
I1129 09:02:24.054042 460401 logs.go:282] 0 containers: []
W1129 09:02:24.054049 460401 logs.go:284] No container was found matching "coredns"
I1129 09:02:24.054055 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1129 09:02:24.054104 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1129 09:02:24.083682 460401 cri.go:89] found id: "092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:24.083704 460401 cri.go:89] found id: "1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea"
I1129 09:02:24.083710 460401 cri.go:89] found id: ""
I1129 09:02:24.083720 460401 logs.go:282] 2 containers: [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21 1ca70c9760bb31036b5cb191fa8757681cc4ff82a6ef53e7d820ae39d6a325ea]
I1129 09:02:24.083797 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:24.089191 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:24.094144 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1129 09:02:24.094223 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1129 09:02:24.123931 460401 cri.go:89] found id: ""
I1129 09:02:24.123956 460401 logs.go:282] 0 containers: []
W1129 09:02:24.123964 460401 logs.go:284] No container was found matching "kube-proxy"
I1129 09:02:24.123972 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1129 09:02:24.124032 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1129 09:02:24.158678 460401 cri.go:89] found id: "c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:24.158704 460401 cri.go:89] found id: "976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:24.158710 460401 cri.go:89] found id: ""
I1129 09:02:24.158721 460401 logs.go:282] 2 containers: [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d]
I1129 09:02:24.158824 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:24.164380 460401 ssh_runner.go:195] Run: which crictl
I1129 09:02:24.170117 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1129 09:02:24.170196 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1129 09:02:24.202016 460401 cri.go:89] found id: ""
I1129 09:02:24.202057 460401 logs.go:282] 0 containers: []
W1129 09:02:24.202066 460401 logs.go:284] No container was found matching "kindnet"
I1129 09:02:24.202072 460401 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1129 09:02:24.202123 460401 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1129 09:02:24.235359 460401 cri.go:89] found id: ""
I1129 09:02:24.235388 460401 logs.go:282] 0 containers: []
W1129 09:02:24.235399 460401 logs.go:284] No container was found matching "storage-provisioner"
I1129 09:02:24.235412 460401 logs.go:123] Gathering logs for kubelet ...
I1129 09:02:24.235427 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1129 09:02:24.327121 460401 logs.go:123] Gathering logs for kube-scheduler [092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21] ...
I1129 09:02:24.327167 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 092aaf3b340b8d1d8f232e35f0798e461727e7b5609738356ddf194405de6b21"
I1129 09:02:24.380608 460401 logs.go:123] Gathering logs for kube-controller-manager [c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6] ...
I1129 09:02:24.380651 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c02fde109f33b7f8e531c53bdd46d0f4d0aa69316a6ccb36f76e8398cb60afd6"
I1129 09:02:24.411895 460401 logs.go:123] Gathering logs for kube-controller-manager [976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d] ...
I1129 09:02:24.411923 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 976f1364ed9a03f83e63e1f687ffb6855e93bbdb516287b2e6c38f7984f7f39d"
I1129 09:02:24.450543 460401 logs.go:123] Gathering logs for containerd ...
I1129 09:02:24.450575 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1129 09:02:24.500105 460401 logs.go:123] Gathering logs for container status ...
I1129 09:02:24.500146 460401 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
64dcae39f0e63 56cc512116c8f 9 seconds ago Running busybox 0 c3b03930e2672 busybox default
84eb7f692c990 ead0a4a53df89 15 seconds ago Running coredns 0 46a4885d817e8 coredns-5dd5756b68-phw28 kube-system
c2b64aca34f8b 6e38f40d628db 15 seconds ago Running storage-provisioner 0 f0e9f57ece0e7 storage-provisioner kube-system
c556471fd7ebd 409467f978b4a 26 seconds ago Running kindnet-cni 0 c9cb87dbe2bae kindnet-k4n9l kube-system
c3eb6059b5593 ea1030da44aa1 29 seconds ago Running kube-proxy 0 d9056ddc2e968 kube-proxy-4rfb4 kube-system
ec1e8ae808249 f6f496300a2ae 47 seconds ago Running kube-scheduler 0 7caf413f5769e kube-scheduler-old-k8s-version-295154 kube-system
b3d9ef849b109 4be79c38a4bab 47 seconds ago Running kube-controller-manager 0 f845d639a6e89 kube-controller-manager-old-k8s-version-295154 kube-system
e534f6de34cb5 73deb9a3f7025 47 seconds ago Running etcd 0 83b4224fe982d etcd-old-k8s-version-295154 kube-system
c912b0431f5b9 bb5e0dde9054c 47 seconds ago Running kube-apiserver 0 c5ef1020ba416 kube-apiserver-old-k8s-version-295154 kube-system
==> containerd <==
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.171284629Z" level=info msg="CreateContainer within sandbox \"f0e9f57ece0e7298ea8ff52e824c152b0a198734fa271e11f9da85ab94980def\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"c2b64aca34f8b72337fd1dd9bda969ab607f739b3b5bd64a9962706bb51f1368\""
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.171952045Z" level=info msg="StartContainer for \"c2b64aca34f8b72337fd1dd9bda969ab607f739b3b5bd64a9962706bb51f1368\""
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.173213037Z" level=info msg="connecting to shim c2b64aca34f8b72337fd1dd9bda969ab607f739b3b5bd64a9962706bb51f1368" address="unix:///run/containerd/s/dc122ba824fb2ecb94628ad2391429e4d2b98c17ac396814c4a25b4d93b141fe" protocol=ttrpc version=3
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.175196491Z" level=info msg="CreateContainer within sandbox \"46a4885d817e84fab45e9ad70e7c335ccc0f307e19f484641f3f563e19a3b305\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"84eb7f692c99059489020b59b47c169ecc9d4286a2bf7a532dae7f5d13e68795\""
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.175823701Z" level=info msg="StartContainer for \"84eb7f692c99059489020b59b47c169ecc9d4286a2bf7a532dae7f5d13e68795\""
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.176634429Z" level=info msg="connecting to shim 84eb7f692c99059489020b59b47c169ecc9d4286a2bf7a532dae7f5d13e68795" address="unix:///run/containerd/s/950489f09bce35a172bb4082bad530c176c650052c0ffe9dab18daf70ee3f021" protocol=ttrpc version=3
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.230846483Z" level=info msg="StartContainer for \"c2b64aca34f8b72337fd1dd9bda969ab607f739b3b5bd64a9962706bb51f1368\" returns successfully"
Nov 29 09:02:13 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:13.234243145Z" level=info msg="StartContainer for \"84eb7f692c99059489020b59b47c169ecc9d4286a2bf7a532dae7f5d13e68795\" returns successfully"
Nov 29 09:02:16 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:16.439586027Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:54baf2f4-8de5-4f66-92ac-f5315174d940,Namespace:default,Attempt:0,}"
Nov 29 09:02:16 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:16.482219935Z" level=info msg="connecting to shim c3b03930e26728c610c785b965715fd3b553dfa8fa71b6e35bcc2370b534d413" address="unix:///run/containerd/s/705109ebb456d589bcc59459487d5f036c6a54c53bc3e7a7b9f9e1b41d8f56cc" namespace=k8s.io protocol=ttrpc version=3
Nov 29 09:02:16 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:16.554186463Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:54baf2f4-8de5-4f66-92ac-f5315174d940,Namespace:default,Attempt:0,} returns sandbox id \"c3b03930e26728c610c785b965715fd3b553dfa8fa71b6e35bcc2370b534d413\""
Nov 29 09:02:16 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:16.556162494Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.188092236Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.188755127Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396643"
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.190108938Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.192089044Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.192508223Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.636298875s"
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.192553605Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.194479178Z" level=info msg="CreateContainer within sandbox \"c3b03930e26728c610c785b965715fd3b553dfa8fa71b6e35bcc2370b534d413\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.201487714Z" level=info msg="Container 64dcae39f0e638d4b6c6e188a3cb9da7d32231fa3ff9ad25ba54b2c00601f705: CDI devices from CRI Config.CDIDevices: []"
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.207643963Z" level=info msg="CreateContainer within sandbox \"c3b03930e26728c610c785b965715fd3b553dfa8fa71b6e35bcc2370b534d413\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"64dcae39f0e638d4b6c6e188a3cb9da7d32231fa3ff9ad25ba54b2c00601f705\""
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.208357251Z" level=info msg="StartContainer for \"64dcae39f0e638d4b6c6e188a3cb9da7d32231fa3ff9ad25ba54b2c00601f705\""
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.209198742Z" level=info msg="connecting to shim 64dcae39f0e638d4b6c6e188a3cb9da7d32231fa3ff9ad25ba54b2c00601f705" address="unix:///run/containerd/s/705109ebb456d589bcc59459487d5f036c6a54c53bc3e7a7b9f9e1b41d8f56cc" protocol=ttrpc version=3
Nov 29 09:02:19 old-k8s-version-295154 containerd[663]: time="2025-11-29T09:02:19.268677673Z" level=info msg="StartContainer for \"64dcae39f0e638d4b6c6e188a3cb9da7d32231fa3ff9ad25ba54b2c00601f705\" returns successfully"
Nov 29 09:02:25 old-k8s-version-295154 containerd[663]: E1129 09:02:25.213853 663 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [84eb7f692c99059489020b59b47c169ecc9d4286a2bf7a532dae7f5d13e68795] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = b7aacdf6a6aa730aafe4d018cac9b7b5ecfb346cba84a99f64521f87aef8b4958639c1cf97967716465791d05bd38f372615327b7cb1d93c850bae532744d54d
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:46306 - 2219 "HINFO IN 2134159150006616805.6033665223682648056. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.036424572s
==> describe nodes <==
Name: old-k8s-version-295154
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-295154
kubernetes.io/os=linux
minikube.k8s.io/commit=d0eb20ec824c82ab3f24099c8b785e0a2a5789af
minikube.k8s.io/name=old-k8s-version-295154
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_29T09_01_47_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sat, 29 Nov 2025 09:01:42 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-295154
AcquireTime: <unset>
RenewTime: Sat, 29 Nov 2025 09:02:26 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sat, 29 Nov 2025 09:02:16 +0000 Sat, 29 Nov 2025 09:01:41 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 29 Nov 2025 09:02:16 +0000 Sat, 29 Nov 2025 09:01:41 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 29 Nov 2025 09:02:16 +0000 Sat, 29 Nov 2025 09:01:41 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sat, 29 Nov 2025 09:02:16 +0000 Sat, 29 Nov 2025 09:02:12 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: old-k8s-version-295154
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863348Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863348Ki
pods: 110
System Info:
Machine ID: 9629f1d5bc1ed524a56ce23c69214c09
System UUID: 22b437c1-66e6-4b41-85ab-28edf17772d8
Boot ID: b81dce2f-73d5-4349-b473-aa1210058cb8
Kernel Version: 6.8.0-1044-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12s
kube-system coredns-5dd5756b68-phw28 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 30s
kube-system etcd-old-k8s-version-295154 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 42s
kube-system kindnet-k4n9l 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 30s
kube-system kube-apiserver-old-k8s-version-295154 250m (3%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system kube-controller-manager-old-k8s-version-295154 200m (2%) 0 (0%) 0 (0%) 0 (0%) 43s
kube-system kube-proxy-4rfb4 0 (0%) 0 (0%) 0 (0%) 0 (0%) 30s
kube-system kube-scheduler-old-k8s-version-295154 100m (1%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 30s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 29s kube-proxy
Normal Starting 43s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 42s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 42s kubelet Node old-k8s-version-295154 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 42s kubelet Node old-k8s-version-295154 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 42s kubelet Node old-k8s-version-295154 status is now: NodeHasSufficientPID
Normal RegisteredNode 31s node-controller Node old-k8s-version-295154 event: Registered Node old-k8s-version-295154 in Controller
Normal NodeReady 16s kubelet Node old-k8s-version-295154 status is now: NodeReady
==> dmesg <==
[Nov29 07:17] TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.
[ +0.001881] MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.
[ +0.084003] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.378167] i8042: Warning: Keylock active
[ +0.012106] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.460417] block sda: the capability attribute has been deprecated.
[ +0.079627] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.021012] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +5.285522] kauditd_printk_skb: 47 callbacks suppressed
==> etcd [e534f6de34cb59a48842df5c90bc3db11dfa608b2f5ab4df9fd455d5a0bc5f86] <==
{"level":"info","ts":"2025-11-29T09:01:40.832264Z","caller":"etcdserver/server.go:738","msg":"started as single-node; fast-forwarding election ticks","local-member-id":"ea7e25599daad906","forward-ticks":9,"forward-duration":"900ms","election-ticks":10,"election-timeout":"1s"}
{"level":"info","ts":"2025-11-29T09:01:40.833809Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","added-peer-id":"ea7e25599daad906","added-peer-peer-urls":["https://192.168.76.2:2380"]}
{"level":"info","ts":"2025-11-29T09:01:40.834831Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-11-29T09:01:40.835134Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"ea7e25599daad906","initial-advertise-peer-urls":["https://192.168.76.2:2380"],"listen-peer-urls":["https://192.168.76.2:2380"],"advertise-client-urls":["https://192.168.76.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.76.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-29T09:01:40.835187Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-29T09:01:40.835365Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-11-29T09:01:40.835454Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-11-29T09:01:41.123873Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-29T09:01:41.123935Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-29T09:01:41.123975Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgPreVoteResp from ea7e25599daad906 at term 1"}
{"level":"info","ts":"2025-11-29T09:01:41.123993Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became candidate at term 2"}
{"level":"info","ts":"2025-11-29T09:01:41.124004Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgVoteResp from ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-29T09:01:41.124048Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became leader at term 2"}
{"level":"info","ts":"2025-11-29T09:01:41.124063Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: ea7e25599daad906 elected leader ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-29T09:01:41.125302Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-29T09:01:41.125326Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-29T09:01:41.125372Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:01:41.125276Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"ea7e25599daad906","local-member-attributes":"{Name:old-k8s-version-295154 ClientURLs:[https://192.168.76.2:2379]}","request-path":"/0/members/ea7e25599daad906/attributes","cluster-id":"6f20f2c4b2fb5f8a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-29T09:01:41.126456Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:01:41.126541Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:01:41.126567Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:01:41.126779Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.76.2:2379"}
{"level":"info","ts":"2025-11-29T09:01:41.127083Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-29T09:01:41.127112Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-29T09:01:41.126728Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
==> kernel <==
09:02:28 up 1:44, 0 user, load average: 2.64, 2.82, 12.39
Linux old-k8s-version-295154 6.8.0-1044-gcp #47~22.04.1-Ubuntu SMP Thu Oct 23 21:07:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [c556471fd7ebd161ba2d7b8d6bae271ee70e193598e07a1f28e7e4edb21ff0ac] <==
I1129 09:02:02.479657 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1129 09:02:02.479993 1 main.go:139] hostIP = 192.168.76.2
podIP = 192.168.76.2
I1129 09:02:02.480115 1 main.go:148] setting mtu 1500 for CNI
I1129 09:02:02.480129 1 main.go:178] kindnetd IP family: "ipv4"
I1129 09:02:02.480148 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-29T09:02:02Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1129 09:02:02.682312 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1129 09:02:02.682392 1 controller.go:381] "Waiting for informer caches to sync"
I1129 09:02:02.682406 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1129 09:02:02.682562 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1129 09:02:03.155518 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1129 09:02:03.155556 1 metrics.go:72] Registering metrics
I1129 09:02:03.155642 1 controller.go:711] "Syncing nftables rules"
I1129 09:02:12.691133 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1129 09:02:12.691191 1 main.go:301] handling current node
I1129 09:02:22.684230 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1129 09:02:22.684264 1 main.go:301] handling current node
==> kube-apiserver [c912b0431f5b96b6ae8d3df9e39af5a731f5b6f4a3128fbae403427258cd4010] <==
I1129 09:01:42.628432 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1129 09:01:42.628473 1 aggregator.go:166] initial CRD sync complete...
I1129 09:01:42.628487 1 autoregister_controller.go:141] Starting autoregister controller
I1129 09:01:42.628498 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1129 09:01:42.628507 1 cache.go:39] Caches are synced for autoregister controller
I1129 09:01:42.630276 1 controller.go:624] quota admission added evaluator for: namespaces
I1129 09:01:42.631842 1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
I1129 09:01:42.632653 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1129 09:01:42.633160 1 cache.go:39] Caches are synced for AvailableConditionController controller
I1129 09:01:42.675946 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1129 09:01:43.534299 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1129 09:01:43.538893 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1129 09:01:43.538914 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1129 09:01:44.048669 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1129 09:01:44.089332 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1129 09:01:44.139778 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1129 09:01:44.147964 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I1129 09:01:44.149152 1 controller.go:624] quota admission added evaluator for: endpoints
I1129 09:01:44.153475 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1129 09:01:44.583851 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1129 09:01:45.899683 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1129 09:01:45.911834 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1129 09:01:45.923913 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1129 09:01:58.190396 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1129 09:01:58.345309 1 controller.go:624] quota admission added evaluator for: replicasets.apps
==> kube-controller-manager [b3d9ef849b10991879886d480043efb13728841f71afc04d4c57f7bef3ceffc8] <==
I1129 09:01:57.601489 1 shared_informer.go:318] Caches are synced for HPA
I1129 09:01:57.641964 1 shared_informer.go:318] Caches are synced for resource quota
I1129 09:01:57.693466 1 shared_informer.go:318] Caches are synced for resource quota
I1129 09:01:58.013319 1 shared_informer.go:318] Caches are synced for garbage collector
I1129 09:01:58.081463 1 shared_informer.go:318] Caches are synced for garbage collector
I1129 09:01:58.081502 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1129 09:01:58.201293 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-k4n9l"
I1129 09:01:58.203642 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-4rfb4"
I1129 09:01:58.351467 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1129 09:01:58.446469 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-rjd8l"
I1129 09:01:58.457821 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-phw28"
I1129 09:01:58.472248 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="121.660505ms"
I1129 09:01:58.490138 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="17.818584ms"
I1129 09:01:58.490294 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="98.203µs"
I1129 09:01:58.749707 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1129 09:01:58.764048 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-rjd8l"
I1129 09:01:58.771830 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="24.493664ms"
I1129 09:01:58.778438 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="6.545401ms"
I1129 09:01:58.778711 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="56.414µs"
I1129 09:02:12.741856 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="137.043µs"
I1129 09:02:12.755154 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="122.723µs"
I1129 09:02:14.089302 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="163.286µs"
I1129 09:02:14.110178 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="8.287126ms"
I1129 09:02:14.110300 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="75.729µs"
I1129 09:02:17.447692 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [c3eb6059b5593e42d8e9ac6b43ac8b87e944eac5747f993c6bbca2acc16f180b] <==
I1129 09:01:58.837203 1 server_others.go:69] "Using iptables proxy"
I1129 09:01:58.847060 1 node.go:141] Successfully retrieved node IP: 192.168.76.2
I1129 09:01:58.872286 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1129 09:01:58.874956 1 server_others.go:152] "Using iptables Proxier"
I1129 09:01:58.875022 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1129 09:01:58.875038 1 server_others.go:438] "Defaulting to no-op detect-local"
I1129 09:01:58.875085 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1129 09:01:58.875423 1 server.go:846] "Version info" version="v1.28.0"
I1129 09:01:58.875446 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1129 09:01:58.877361 1 config.go:188] "Starting service config controller"
I1129 09:01:58.877426 1 shared_informer.go:311] Waiting for caches to sync for service config
I1129 09:01:58.878055 1 config.go:97] "Starting endpoint slice config controller"
I1129 09:01:58.878080 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1129 09:01:58.878567 1 config.go:315] "Starting node config controller"
I1129 09:01:58.878812 1 shared_informer.go:311] Waiting for caches to sync for node config
I1129 09:01:58.977719 1 shared_informer.go:318] Caches are synced for service config
I1129 09:01:58.978897 1 shared_informer.go:318] Caches are synced for endpoint slice config
I1129 09:01:58.979002 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [ec1e8ae808249468b5a57a4c1aa02a0700a8af9e46e3b394b96fda393ef3531b] <==
E1129 09:01:42.591266 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1129 09:01:42.591281 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W1129 09:01:43.438322 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1129 09:01:43.438354 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1129 09:01:43.459244 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1129 09:01:43.459274 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1129 09:01:43.466076 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1129 09:01:43.466111 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W1129 09:01:43.467104 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1129 09:01:43.467131 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1129 09:01:43.496506 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1129 09:01:43.496554 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1129 09:01:43.745308 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1129 09:01:43.745358 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1129 09:01:43.782232 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1129 09:01:43.782279 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1129 09:01:43.784711 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1129 09:01:43.784785 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1129 09:01:43.822287 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1129 09:01:43.822413 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1129 09:01:43.831935 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1129 09:01:43.831979 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1129 09:01:44.009190 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1129 09:01:44.009227 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
I1129 09:01:46.586725 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 29 09:01:57 old-k8s-version-295154 kubelet[1505]: I1129 09:01:57.557701 1505 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.211770 1505 topology_manager.go:215] "Topology Admit Handler" podUID="74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8" podNamespace="kube-system" podName="kindnet-k4n9l"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.211977 1505 topology_manager.go:215] "Topology Admit Handler" podUID="05ef67c3-0d6e-453d-a0e5-81c649c3e033" podNamespace="kube-system" podName="kube-proxy-4rfb4"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.245664 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvjhl\" (UniqueName: \"kubernetes.io/projected/74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8-kube-api-access-kvjhl\") pod \"kindnet-k4n9l\" (UID: \"74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8\") " pod="kube-system/kindnet-k4n9l"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.245757 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8-cni-cfg\") pod \"kindnet-k4n9l\" (UID: \"74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8\") " pod="kube-system/kindnet-k4n9l"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.245804 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8-lib-modules\") pod \"kindnet-k4n9l\" (UID: \"74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8\") " pod="kube-system/kindnet-k4n9l"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.245867 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/05ef67c3-0d6e-453d-a0e5-81c649c3e033-xtables-lock\") pod \"kube-proxy-4rfb4\" (UID: \"05ef67c3-0d6e-453d-a0e5-81c649c3e033\") " pod="kube-system/kube-proxy-4rfb4"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.245918 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/05ef67c3-0d6e-453d-a0e5-81c649c3e033-lib-modules\") pod \"kube-proxy-4rfb4\" (UID: \"05ef67c3-0d6e-453d-a0e5-81c649c3e033\") " pod="kube-system/kube-proxy-4rfb4"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.245964 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/05ef67c3-0d6e-453d-a0e5-81c649c3e033-kube-proxy\") pod \"kube-proxy-4rfb4\" (UID: \"05ef67c3-0d6e-453d-a0e5-81c649c3e033\") " pod="kube-system/kube-proxy-4rfb4"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.245999 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8-xtables-lock\") pod \"kindnet-k4n9l\" (UID: \"74cdf2cd-3f3a-4be5-9a9f-6d0b67090fb8\") " pod="kube-system/kindnet-k4n9l"
Nov 29 09:01:58 old-k8s-version-295154 kubelet[1505]: I1129 09:01:58.246031 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6tpd\" (UniqueName: \"kubernetes.io/projected/05ef67c3-0d6e-453d-a0e5-81c649c3e033-kube-api-access-l6tpd\") pod \"kube-proxy-4rfb4\" (UID: \"05ef67c3-0d6e-453d-a0e5-81c649c3e033\") " pod="kube-system/kube-proxy-4rfb4"
Nov 29 09:01:59 old-k8s-version-295154 kubelet[1505]: I1129 09:01:59.051481 1505 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-4rfb4" podStartSLOduration=1.051403893 podCreationTimestamp="2025-11-29 09:01:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 09:01:59.051034434 +0000 UTC m=+13.185091147" watchObservedRunningTime="2025-11-29 09:01:59.051403893 +0000 UTC m=+13.185460607"
Nov 29 09:02:03 old-k8s-version-295154 kubelet[1505]: I1129 09:02:03.075069 1505 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-k4n9l" podStartSLOduration=1.8021440370000001 podCreationTimestamp="2025-11-29 09:01:58 +0000 UTC" firstStartedPulling="2025-11-29 09:01:58.884230342 +0000 UTC m=+13.018287046" lastFinishedPulling="2025-11-29 09:02:02.157002868 +0000 UTC m=+16.291059564" observedRunningTime="2025-11-29 09:02:03.074620988 +0000 UTC m=+17.208677701" watchObservedRunningTime="2025-11-29 09:02:03.074916555 +0000 UTC m=+17.208973271"
Nov 29 09:02:12 old-k8s-version-295154 kubelet[1505]: I1129 09:02:12.718189 1505 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 29 09:02:12 old-k8s-version-295154 kubelet[1505]: I1129 09:02:12.741770 1505 topology_manager.go:215] "Topology Admit Handler" podUID="7fc2b8dd-43dd-43df-8887-9ffa6de36fb4" podNamespace="kube-system" podName="coredns-5dd5756b68-phw28"
Nov 29 09:02:12 old-k8s-version-295154 kubelet[1505]: I1129 09:02:12.742156 1505 topology_manager.go:215] "Topology Admit Handler" podUID="359871fd-a77c-430a-87c1-b313992718e2" podNamespace="kube-system" podName="storage-provisioner"
Nov 29 09:02:12 old-k8s-version-295154 kubelet[1505]: I1129 09:02:12.838446 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sztkn\" (UniqueName: \"kubernetes.io/projected/7fc2b8dd-43dd-43df-8887-9ffa6de36fb4-kube-api-access-sztkn\") pod \"coredns-5dd5756b68-phw28\" (UID: \"7fc2b8dd-43dd-43df-8887-9ffa6de36fb4\") " pod="kube-system/coredns-5dd5756b68-phw28"
Nov 29 09:02:12 old-k8s-version-295154 kubelet[1505]: I1129 09:02:12.838527 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ghrm\" (UniqueName: \"kubernetes.io/projected/359871fd-a77c-430a-87c1-b313992718e2-kube-api-access-2ghrm\") pod \"storage-provisioner\" (UID: \"359871fd-a77c-430a-87c1-b313992718e2\") " pod="kube-system/storage-provisioner"
Nov 29 09:02:12 old-k8s-version-295154 kubelet[1505]: I1129 09:02:12.838708 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7fc2b8dd-43dd-43df-8887-9ffa6de36fb4-config-volume\") pod \"coredns-5dd5756b68-phw28\" (UID: \"7fc2b8dd-43dd-43df-8887-9ffa6de36fb4\") " pod="kube-system/coredns-5dd5756b68-phw28"
Nov 29 09:02:12 old-k8s-version-295154 kubelet[1505]: I1129 09:02:12.838811 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/359871fd-a77c-430a-87c1-b313992718e2-tmp\") pod \"storage-provisioner\" (UID: \"359871fd-a77c-430a-87c1-b313992718e2\") " pod="kube-system/storage-provisioner"
Nov 29 09:02:14 old-k8s-version-295154 kubelet[1505]: I1129 09:02:14.089000 1505 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-phw28" podStartSLOduration=16.088943107 podCreationTimestamp="2025-11-29 09:01:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 09:02:14.088869179 +0000 UTC m=+28.222925894" watchObservedRunningTime="2025-11-29 09:02:14.088943107 +0000 UTC m=+28.222999821"
Nov 29 09:02:14 old-k8s-version-295154 kubelet[1505]: I1129 09:02:14.111723 1505 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=16.111665904 podCreationTimestamp="2025-11-29 09:01:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 09:02:14.111613929 +0000 UTC m=+28.245670654" watchObservedRunningTime="2025-11-29 09:02:14.111665904 +0000 UTC m=+28.245722610"
Nov 29 09:02:16 old-k8s-version-295154 kubelet[1505]: I1129 09:02:16.130277 1505 topology_manager.go:215] "Topology Admit Handler" podUID="54baf2f4-8de5-4f66-92ac-f5315174d940" podNamespace="default" podName="busybox"
Nov 29 09:02:16 old-k8s-version-295154 kubelet[1505]: I1129 09:02:16.160532 1505 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wj46k\" (UniqueName: \"kubernetes.io/projected/54baf2f4-8de5-4f66-92ac-f5315174d940-kube-api-access-wj46k\") pod \"busybox\" (UID: \"54baf2f4-8de5-4f66-92ac-f5315174d940\") " pod="default/busybox"
Nov 29 09:02:20 old-k8s-version-295154 kubelet[1505]: I1129 09:02:20.102644 1505 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.465512975 podCreationTimestamp="2025-11-29 09:02:16 +0000 UTC" firstStartedPulling="2025-11-29 09:02:16.555803596 +0000 UTC m=+30.689860305" lastFinishedPulling="2025-11-29 09:02:19.192874383 +0000 UTC m=+33.326931083" observedRunningTime="2025-11-29 09:02:20.102453338 +0000 UTC m=+34.236510058" watchObservedRunningTime="2025-11-29 09:02:20.102583753 +0000 UTC m=+34.236640469"
==> storage-provisioner [c2b64aca34f8b72337fd1dd9bda969ab607f739b3b5bd64a9962706bb51f1368] <==
I1129 09:02:13.242146 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1129 09:02:13.250320 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1129 09:02:13.250375 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1129 09:02:13.260646 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1129 09:02:13.260835 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"3d38b917-49d9-4ce8-b6d4-33e78e4354a6", APIVersion:"v1", ResourceVersion:"393", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-295154_6170b45d-8612-41e5-bb3d-e5fe156c196d became leader
I1129 09:02:13.260885 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-295154_6170b45d-8612-41e5-bb3d-e5fe156c196d!
I1129 09:02:13.362157 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-295154_6170b45d-8612-41e5-bb3d-e5fe156c196d!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-295154 -n old-k8s-version-295154
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-295154 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestStartStop/group/old-k8s-version/serial/DeployApp (13.54s)