=== RUN TestMultiControlPlane/serial/DeployApp
ha_test.go:128: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- apply -f ./testdata/ha/ha-pod-dns-test.yaml
ha_test.go:133: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- rollout status deployment/busybox
E0919 22:27:11.705454 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/addons-019551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:25.077360 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:25.083919 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:25.095404 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:25.116872 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:25.158293 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:25.239850 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:25.401535 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:25.723258 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:26.365324 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:27.647387 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:30.209035 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:35.331291 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:39.414736 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/addons-019551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:45.572748 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:28:06.054322 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:28:47.016195 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:30:08.941452 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:32:11.705971 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/addons-019551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:32:25.079362 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:32:52.783294 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:133: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-326307 kubectl -- rollout status deployment/busybox: exit status 1 (10m4.235325594s)
-- stdout --
Waiting for deployment "busybox" rollout to finish: 0 out of 3 new replicas have been updated...
Waiting for deployment "busybox" rollout to finish: 0 of 3 updated replicas are available...
Waiting for deployment "busybox" rollout to finish: 0 of 8 updated replicas are available...
Waiting for deployment "busybox" rollout to finish: 0 of 3 updated replicas are available...
Waiting for deployment "busybox" rollout to finish: 1 of 3 updated replicas are available...
Waiting for deployment "busybox" rollout to finish: 2 of 3 updated replicas are available...
-- /stdout --
** stderr **
error: deployment "busybox" exceeded its progress deadline
** /stderr **
ha_test.go:135: failed to deploy busybox to ha (multi-control plane) cluster
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0919 22:35:23.143141 18210 retry.go:31] will retry after 622.646629ms: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0919 22:35:23.895894 18210 retry.go:31] will retry after 1.274079667s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0919 22:35:25.294863 18210 retry.go:31] will retry after 2.357002104s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0919 22:35:27.774780 18210 retry.go:31] will retry after 3.365068968s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0919 22:35:31.263515 18210 retry.go:31] will retry after 5.283067733s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0919 22:35:36.667794 18210 retry.go:31] will retry after 10.062930097s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0919 22:35:46.857777 18210 retry.go:31] will retry after 7.223020536s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0919 22:35:54.200871 18210 retry.go:31] will retry after 18.199948632s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0919 22:36:12.521583 18210 retry.go:31] will retry after 15.567553254s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
I0919 22:36:28.220548 18210 retry.go:31] will retry after 53.864648201s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
E0919 22:37:11.705879 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/addons-019551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:159: failed to resolve pod IPs: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.0.4 10.244.1.2'\n\n-- /stdout --"
ha_test.go:163: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- get pods -o jsonpath='{.items[*].metadata.name}'
ha_test.go:171: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- exec busybox-7b57f96db7-jdczt -- nslookup kubernetes.io
ha_test.go:171: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-326307 kubectl -- exec busybox-7b57f96db7-jdczt -- nslookup kubernetes.io: exit status 1 (142.860919ms)
** stderr **
error: Internal error occurred: unable to upgrade connection: container not found ("busybox")
** /stderr **
ha_test.go:173: Pod busybox-7b57f96db7-jdczt could not resolve 'kubernetes.io': exit status 1
ha_test.go:171: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- exec busybox-7b57f96db7-m8swj -- nslookup kubernetes.io
ha_test.go:171: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- exec busybox-7b57f96db7-tfpvf -- nslookup kubernetes.io
ha_test.go:181: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- exec busybox-7b57f96db7-jdczt -- nslookup kubernetes.default
ha_test.go:181: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-326307 kubectl -- exec busybox-7b57f96db7-jdczt -- nslookup kubernetes.default: exit status 1 (134.43079ms)
** stderr **
error: Internal error occurred: unable to upgrade connection: container not found ("busybox")
** /stderr **
ha_test.go:183: Pod busybox-7b57f96db7-jdczt could not resolve 'kubernetes.default': exit status 1
ha_test.go:181: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- exec busybox-7b57f96db7-m8swj -- nslookup kubernetes.default
ha_test.go:181: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- exec busybox-7b57f96db7-tfpvf -- nslookup kubernetes.default
ha_test.go:189: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- exec busybox-7b57f96db7-jdczt -- nslookup kubernetes.default.svc.cluster.local
ha_test.go:189: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-326307 kubectl -- exec busybox-7b57f96db7-jdczt -- nslookup kubernetes.default.svc.cluster.local: exit status 1 (134.80458ms)
** stderr **
error: Internal error occurred: unable to upgrade connection: container not found ("busybox")
** /stderr **
ha_test.go:191: Pod busybox-7b57f96db7-jdczt could not resolve local service (kubernetes.default.svc.cluster.local): exit status 1
ha_test.go:189: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- exec busybox-7b57f96db7-m8swj -- nslookup kubernetes.default.svc.cluster.local
ha_test.go:189: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 kubectl -- exec busybox-7b57f96db7-tfpvf -- nslookup kubernetes.default.svc.cluster.local
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestMultiControlPlane/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestMultiControlPlane/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect ha-326307
helpers_test.go:243: (dbg) docker inspect ha-326307:
-- stdout --
[
{
"Id": "5e0f1fe86b0818450b29e917af8d9dda81e310353b20f615454f610bda0c56f3",
"Created": "2025-09-19T22:23:18.619000062Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 69921,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-09-19T22:23:18.670514121Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:c6b5532e987b5b4f5fc9cb0336e378ed49c0542bad8cbfc564b71e977a6269de",
"ResolvConfPath": "/var/lib/docker/containers/5e0f1fe86b0818450b29e917af8d9dda81e310353b20f615454f610bda0c56f3/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/5e0f1fe86b0818450b29e917af8d9dda81e310353b20f615454f610bda0c56f3/hostname",
"HostsPath": "/var/lib/docker/containers/5e0f1fe86b0818450b29e917af8d9dda81e310353b20f615454f610bda0c56f3/hosts",
"LogPath": "/var/lib/docker/containers/5e0f1fe86b0818450b29e917af8d9dda81e310353b20f615454f610bda0c56f3/5e0f1fe86b0818450b29e917af8d9dda81e310353b20f615454f610bda0c56f3-json.log",
"Name": "/ha-326307",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"ha-326307:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "ha-326307",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "5e0f1fe86b0818450b29e917af8d9dda81e310353b20f615454f610bda0c56f3",
"LowerDir": "/var/lib/docker/overlay2/30bd57649629d477287235001469bfd41a7805ca0a999738e74eba07285cd630-init/diff:/var/lib/docker/overlay2/a03f655342f0080430c48b45e821bb7f49cd991d97a882d9cb55b520de280887/diff",
"MergedDir": "/var/lib/docker/overlay2/30bd57649629d477287235001469bfd41a7805ca0a999738e74eba07285cd630/merged",
"UpperDir": "/var/lib/docker/overlay2/30bd57649629d477287235001469bfd41a7805ca0a999738e74eba07285cd630/diff",
"WorkDir": "/var/lib/docker/overlay2/30bd57649629d477287235001469bfd41a7805ca0a999738e74eba07285cd630/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "ha-326307",
"Source": "/var/lib/docker/volumes/ha-326307/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "ha-326307",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "ha-326307",
"name.minikube.sigs.k8s.io": "ha-326307",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "8b9c61cd0152986e2b265b3cf0a7628b1c049e495ce30493b8e54f6b9446115f",
"SandboxKey": "/var/run/docker/netns/8b9c61cd0152",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32788"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32789"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32792"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32790"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32791"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"ha-326307": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "aa:80:09:d2:65:c4",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "465af21e2d8d112a34f14f7c3ab89eac4e6e57f582ff8e32b514381f55dd085e",
"EndpointID": "f35735061c65841c2c1ba7f2859db25885582588fa8f2d14e3a015320f6c3fc4",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"ha-326307",
"5e0f1fe86b08"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p ha-326307 -n ha-326307
helpers_test.go:252: <<< TestMultiControlPlane/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestMultiControlPlane/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p ha-326307 logs -n 25
E0919 22:37:25.077815 18210 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/functional-541880/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p ha-326307 logs -n 25: (1.330116508s)
helpers_test.go:260: TestMultiControlPlane/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ delete │ -p functional-541880 │ functional-541880 │ jenkins │ v1.37.0 │ 19 Sep 25 22:23 UTC │ 19 Sep 25 22:23 UTC │
│ start │ ha-326307 start --ha --memory 3072 --wait true --alsologtostderr -v 5 --driver=docker --container-runtime=containerd │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:23 UTC │ 19 Sep 25 22:25 UTC │
│ kubectl │ ha-326307 kubectl -- apply -f ./testdata/ha/ha-pod-dns-test.yaml │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:25 UTC │ 19 Sep 25 22:25 UTC │
│ kubectl │ ha-326307 kubectl -- rollout status deployment/busybox │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:25 UTC │ │
│ kubectl │ ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:35 UTC │ 19 Sep 25 22:35 UTC │
│ kubectl │ ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:35 UTC │ 19 Sep 25 22:35 UTC │
│ kubectl │ ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:35 UTC │ 19 Sep 25 22:35 UTC │
│ kubectl │ ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:35 UTC │ 19 Sep 25 22:35 UTC │
│ kubectl │ ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:35 UTC │ 19 Sep 25 22:35 UTC │
│ kubectl │ ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:35 UTC │ 19 Sep 25 22:35 UTC │
│ kubectl │ ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:35 UTC │ 19 Sep 25 22:35 UTC │
│ kubectl │ ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:35 UTC │ 19 Sep 25 22:35 UTC │
│ kubectl │ ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:36 UTC │ 19 Sep 25 22:36 UTC │
│ kubectl │ ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:36 UTC │ 19 Sep 25 22:36 UTC │
│ kubectl │ ha-326307 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:37 UTC │ 19 Sep 25 22:37 UTC │
│ kubectl │ ha-326307 kubectl -- get pods -o jsonpath='{.items[*].metadata.name}' │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:37 UTC │ 19 Sep 25 22:37 UTC │
│ kubectl │ ha-326307 kubectl -- exec busybox-7b57f96db7-jdczt -- nslookup kubernetes.io │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:37 UTC │ │
│ kubectl │ ha-326307 kubectl -- exec busybox-7b57f96db7-m8swj -- nslookup kubernetes.io │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:37 UTC │ 19 Sep 25 22:37 UTC │
│ kubectl │ ha-326307 kubectl -- exec busybox-7b57f96db7-tfpvf -- nslookup kubernetes.io │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:37 UTC │ 19 Sep 25 22:37 UTC │
│ kubectl │ ha-326307 kubectl -- exec busybox-7b57f96db7-jdczt -- nslookup kubernetes.default │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:37 UTC │ │
│ kubectl │ ha-326307 kubectl -- exec busybox-7b57f96db7-m8swj -- nslookup kubernetes.default │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:37 UTC │ 19 Sep 25 22:37 UTC │
│ kubectl │ ha-326307 kubectl -- exec busybox-7b57f96db7-tfpvf -- nslookup kubernetes.default │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:37 UTC │ 19 Sep 25 22:37 UTC │
│ kubectl │ ha-326307 kubectl -- exec busybox-7b57f96db7-jdczt -- nslookup kubernetes.default.svc.cluster.local │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:37 UTC │ │
│ kubectl │ ha-326307 kubectl -- exec busybox-7b57f96db7-m8swj -- nslookup kubernetes.default.svc.cluster.local │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:37 UTC │ 19 Sep 25 22:37 UTC │
│ kubectl │ ha-326307 kubectl -- exec busybox-7b57f96db7-tfpvf -- nslookup kubernetes.default.svc.cluster.local │ ha-326307 │ jenkins │ v1.37.0 │ 19 Sep 25 22:37 UTC │ 19 Sep 25 22:37 UTC │
└─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/09/19 22:23:13
Running on machine: ubuntu-20-agent-8
Binary: Built with gc go1.24.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0919 22:23:13.527478 69358 out.go:360] Setting OutFile to fd 1 ...
I0919 22:23:13.527574 69358 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0919 22:23:13.527579 69358 out.go:374] Setting ErrFile to fd 2...
I0919 22:23:13.527586 69358 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0919 22:23:13.527823 69358 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21594-14678/.minikube/bin
I0919 22:23:13.528355 69358 out.go:368] Setting JSON to false
I0919 22:23:13.529260 69358 start.go:130] hostinfo: {"hostname":"ubuntu-20-agent-8","uptime":3938,"bootTime":1758316656,"procs":193,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1037-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0919 22:23:13.529345 69358 start.go:140] virtualization: kvm guest
I0919 22:23:13.531661 69358 out.go:179] * [ha-326307] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I0919 22:23:13.533198 69358 notify.go:220] Checking for updates...
I0919 22:23:13.533231 69358 out.go:179] - MINIKUBE_LOCATION=21594
I0919 22:23:13.534827 69358 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0919 22:23:13.536340 69358 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21594-14678/kubeconfig
I0919 22:23:13.537773 69358 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21594-14678/.minikube
I0919 22:23:13.539372 69358 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I0919 22:23:13.541189 69358 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I0919 22:23:13.542697 69358 driver.go:421] Setting default libvirt URI to qemu:///system
I0919 22:23:13.568228 69358 docker.go:123] docker version: linux-28.4.0:Docker Engine - Community
I0919 22:23:13.568380 69358 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0919 22:23:13.622546 69358 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:false NGoroutines:46 SystemTime:2025-09-19 22:23:13.612893654 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.8.0-1037-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:
x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652178944 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-8 Labels:[] ExperimentalBuild:false ServerVersion:28.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.28.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.39.4] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner (EXPERIMENTAL) Vendor:Docker Inc. Version:v0.1.39] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0919 22:23:13.622646 69358 docker.go:318] overlay module found
I0919 22:23:13.624668 69358 out.go:179] * Using the docker driver based on user configuration
I0919 22:23:13.626116 69358 start.go:304] selected driver: docker
I0919 22:23:13.626134 69358 start.go:918] validating driver "docker" against <nil>
I0919 22:23:13.626147 69358 start.go:929] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0919 22:23:13.626725 69358 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0919 22:23:13.684385 69358 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:false NGoroutines:46 SystemTime:2025-09-19 22:23:13.672811393 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.8.0-1037-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:
x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652178944 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-8 Labels:[] ExperimentalBuild:false ServerVersion:28.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.28.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.39.4] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner (EXPERIMENTAL) Vendor:Docker Inc. Version:v0.1.39] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0919 22:23:13.684569 69358 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I0919 22:23:13.684775 69358 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0919 22:23:13.686618 69358 out.go:179] * Using Docker driver with root privileges
I0919 22:23:13.687924 69358 cni.go:84] Creating CNI manager for ""
I0919 22:23:13.688000 69358 cni.go:136] multinode detected (0 nodes found), recommending kindnet
I0919 22:23:13.688014 69358 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I0919 22:23:13.688089 69358 start.go:348] cluster config:
{Name:ha-326307 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-326307 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPl
ugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m
0s}
I0919 22:23:13.689601 69358 out.go:179] * Starting "ha-326307" primary control-plane node in "ha-326307" cluster
I0919 22:23:13.691305 69358 cache.go:123] Beginning downloading kic base image for docker with containerd
I0919 22:23:13.692823 69358 out.go:179] * Pulling base image v0.0.48 ...
I0919 22:23:13.694304 69358 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0919 22:23:13.694378 69358 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21594-14678/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4
I0919 22:23:13.694398 69358 cache.go:58] Caching tarball of preloaded images
I0919 22:23:13.694426 69358 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0919 22:23:13.694515 69358 preload.go:172] Found /home/jenkins/minikube-integration/21594-14678/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I0919 22:23:13.694533 69358 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on containerd
I0919 22:23:13.694981 69358 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/config.json ...
I0919 22:23:13.695014 69358 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/config.json: {Name:mk9e3af266bcfbabd18624d7d22535c6f1841e44 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:23:13.716737 69358 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0919 22:23:13.716759 69358 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0919 22:23:13.716776 69358 cache.go:232] Successfully downloaded all kic artifacts
I0919 22:23:13.716797 69358 start.go:360] acquireMachinesLock for ha-326307: {Name:mk42b79b90944aab63c8b37c2f94e04ca1ebec1c Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0919 22:23:13.716893 69358 start.go:364] duration metric: took 80.537µs to acquireMachinesLock for "ha-326307"
I0919 22:23:13.716915 69358 start.go:93] Provisioning new machine with config: &{Name:ha-326307 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-326307 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APISer
verIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath
: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0919 22:23:13.716974 69358 start.go:125] createHost starting for "" (driver="docker")
I0919 22:23:13.719062 69358 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0919 22:23:13.719317 69358 start.go:159] libmachine.API.Create for "ha-326307" (driver="docker")
I0919 22:23:13.719352 69358 client.go:168] LocalClient.Create starting
I0919 22:23:13.719447 69358 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem
I0919 22:23:13.719502 69358 main.go:141] libmachine: Decoding PEM data...
I0919 22:23:13.719517 69358 main.go:141] libmachine: Parsing certificate...
I0919 22:23:13.719580 69358 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-14678/.minikube/certs/cert.pem
I0919 22:23:13.719600 69358 main.go:141] libmachine: Decoding PEM data...
I0919 22:23:13.719610 69358 main.go:141] libmachine: Parsing certificate...
I0919 22:23:13.719933 69358 cli_runner.go:164] Run: docker network inspect ha-326307 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0919 22:23:13.737609 69358 cli_runner.go:211] docker network inspect ha-326307 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0919 22:23:13.737699 69358 network_create.go:284] running [docker network inspect ha-326307] to gather additional debugging logs...
I0919 22:23:13.737725 69358 cli_runner.go:164] Run: docker network inspect ha-326307
W0919 22:23:13.755400 69358 cli_runner.go:211] docker network inspect ha-326307 returned with exit code 1
I0919 22:23:13.755437 69358 network_create.go:287] error running [docker network inspect ha-326307]: docker network inspect ha-326307: exit status 1
stdout:
[]
stderr:
Error response from daemon: network ha-326307 not found
I0919 22:23:13.755455 69358 network_create.go:289] output of [docker network inspect ha-326307]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network ha-326307 not found
** /stderr **
I0919 22:23:13.755563 69358 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:23:13.774541 69358 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc0018eb270}
I0919 22:23:13.774578 69358 network_create.go:124] attempt to create docker network ha-326307 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0919 22:23:13.774619 69358 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=ha-326307 ha-326307
I0919 22:23:13.834699 69358 network_create.go:108] docker network ha-326307 192.168.49.0/24 created
I0919 22:23:13.834730 69358 kic.go:121] calculated static IP "192.168.49.2" for the "ha-326307" container
I0919 22:23:13.834799 69358 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0919 22:23:13.852316 69358 cli_runner.go:164] Run: docker volume create ha-326307 --label name.minikube.sigs.k8s.io=ha-326307 --label created_by.minikube.sigs.k8s.io=true
I0919 22:23:13.872969 69358 oci.go:103] Successfully created a docker volume ha-326307
I0919 22:23:13.873115 69358 cli_runner.go:164] Run: docker run --rm --name ha-326307-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-326307 --entrypoint /usr/bin/test -v ha-326307:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0919 22:23:14.277718 69358 oci.go:107] Successfully prepared a docker volume ha-326307
I0919 22:23:14.277762 69358 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0919 22:23:14.277789 69358 kic.go:194] Starting extracting preloaded images to volume ...
I0919 22:23:14.277852 69358 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-14678/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-326307:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0919 22:23:18.547851 69358 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-14678/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-326307:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (4.269954037s)
I0919 22:23:18.547886 69358 kic.go:203] duration metric: took 4.270092787s to extract preloaded images to volume ...
W0919 22:23:18.548002 69358 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0919 22:23:18.548044 69358 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0919 22:23:18.548091 69358 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0919 22:23:18.602395 69358 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ha-326307 --name ha-326307 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-326307 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ha-326307 --network ha-326307 --ip 192.168.49.2 --volume ha-326307:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0919 22:23:18.902433 69358 cli_runner.go:164] Run: docker container inspect ha-326307 --format={{.State.Running}}
I0919 22:23:18.923488 69358 cli_runner.go:164] Run: docker container inspect ha-326307 --format={{.State.Status}}
I0919 22:23:18.945324 69358 cli_runner.go:164] Run: docker exec ha-326307 stat /var/lib/dpkg/alternatives/iptables
I0919 22:23:18.998198 69358 oci.go:144] the created container "ha-326307" has a running status.
I0919 22:23:18.998254 69358 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307/id_rsa...
I0919 22:23:19.305578 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0919 22:23:19.305639 69358 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0919 22:23:19.338987 69358 cli_runner.go:164] Run: docker container inspect ha-326307 --format={{.State.Status}}
I0919 22:23:19.361057 69358 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0919 22:23:19.361077 69358 kic_runner.go:114] Args: [docker exec --privileged ha-326307 chown docker:docker /home/docker/.ssh/authorized_keys]
I0919 22:23:19.423644 69358 cli_runner.go:164] Run: docker container inspect ha-326307 --format={{.State.Status}}
I0919 22:23:19.446710 69358 machine.go:93] provisionDockerMachine start ...
I0919 22:23:19.446815 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307
I0919 22:23:19.468914 69358 main.go:141] libmachine: Using SSH client type: native
I0919 22:23:19.469178 69358 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0919 22:23:19.469194 69358 main.go:141] libmachine: About to run SSH command:
hostname
I0919 22:23:19.609654 69358 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-326307
I0919 22:23:19.609685 69358 ubuntu.go:182] provisioning hostname "ha-326307"
I0919 22:23:19.609806 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307
I0919 22:23:19.631352 69358 main.go:141] libmachine: Using SSH client type: native
I0919 22:23:19.631769 69358 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0919 22:23:19.631790 69358 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-326307 && echo "ha-326307" | sudo tee /etc/hostname
I0919 22:23:19.783770 69358 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-326307
I0919 22:23:19.783868 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307
I0919 22:23:19.802757 69358 main.go:141] libmachine: Using SSH client type: native
I0919 22:23:19.802967 69358 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0919 22:23:19.802990 69358 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-326307' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-326307/g' /etc/hosts;
else
echo '127.0.1.1 ha-326307' | sudo tee -a /etc/hosts;
fi
fi
I0919 22:23:19.942778 69358 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0919 22:23:19.942811 69358 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21594-14678/.minikube CaCertPath:/home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21594-14678/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21594-14678/.minikube}
I0919 22:23:19.942925 69358 ubuntu.go:190] setting up certificates
I0919 22:23:19.942949 69358 provision.go:84] configureAuth start
I0919 22:23:19.943010 69358 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-326307
I0919 22:23:19.963444 69358 provision.go:143] copyHostCerts
I0919 22:23:19.963491 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/21594-14678/.minikube/ca.pem
I0919 22:23:19.963531 69358 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-14678/.minikube/ca.pem, removing ...
I0919 22:23:19.963541 69358 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-14678/.minikube/ca.pem
I0919 22:23:19.963629 69358 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21594-14678/.minikube/ca.pem (1082 bytes)
I0919 22:23:19.963778 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/21594-14678/.minikube/cert.pem
I0919 22:23:19.963807 69358 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-14678/.minikube/cert.pem, removing ...
I0919 22:23:19.963811 69358 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-14678/.minikube/cert.pem
I0919 22:23:19.963862 69358 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21594-14678/.minikube/cert.pem (1123 bytes)
I0919 22:23:19.963997 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/21594-14678/.minikube/key.pem
I0919 22:23:19.964030 69358 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-14678/.minikube/key.pem, removing ...
I0919 22:23:19.964040 69358 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-14678/.minikube/key.pem
I0919 22:23:19.964080 69358 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21594-14678/.minikube/key.pem (1675 bytes)
I0919 22:23:19.964187 69358 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21594-14678/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca-key.pem org=jenkins.ha-326307 san=[127.0.0.1 192.168.49.2 ha-326307 localhost minikube]
I0919 22:23:20.747311 69358 provision.go:177] copyRemoteCerts
I0919 22:23:20.747377 69358 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0919 22:23:20.747410 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307
I0919 22:23:20.766468 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307/id_rsa Username:docker}
I0919 22:23:20.866991 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0919 22:23:20.867057 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0919 22:23:20.897799 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/machines/server.pem -> /etc/docker/server.pem
I0919 22:23:20.897858 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/machines/server.pem --> /etc/docker/server.pem (1200 bytes)
I0919 22:23:20.925953 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0919 22:23:20.926026 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0919 22:23:20.954845 69358 provision.go:87] duration metric: took 1.011880735s to configureAuth
I0919 22:23:20.954872 69358 ubuntu.go:206] setting minikube options for container-runtime
I0919 22:23:20.955074 69358 config.go:182] Loaded profile config "ha-326307": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0919 22:23:20.955089 69358 machine.go:96] duration metric: took 1.508356629s to provisionDockerMachine
I0919 22:23:20.955096 69358 client.go:171] duration metric: took 7.235738314s to LocalClient.Create
I0919 22:23:20.955122 69358 start.go:167] duration metric: took 7.235806728s to libmachine.API.Create "ha-326307"
I0919 22:23:20.955128 69358 start.go:293] postStartSetup for "ha-326307" (driver="docker")
I0919 22:23:20.955136 69358 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0919 22:23:20.955224 69358 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0919 22:23:20.955259 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307
I0919 22:23:20.975767 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307/id_rsa Username:docker}
I0919 22:23:21.077921 69358 ssh_runner.go:195] Run: cat /etc/os-release
I0919 22:23:21.081820 69358 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0919 22:23:21.081872 69358 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0919 22:23:21.081881 69358 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0919 22:23:21.081888 69358 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0919 22:23:21.081901 69358 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-14678/.minikube/addons for local assets ...
I0919 22:23:21.081973 69358 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-14678/.minikube/files for local assets ...
I0919 22:23:21.082057 69358 filesync.go:149] local asset: /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem -> 182102.pem in /etc/ssl/certs
I0919 22:23:21.082071 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem -> /etc/ssl/certs/182102.pem
I0919 22:23:21.082204 69358 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0919 22:23:21.092245 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem --> /etc/ssl/certs/182102.pem (1708 bytes)
I0919 22:23:21.123732 69358 start.go:296] duration metric: took 168.590139ms for postStartSetup
I0919 22:23:21.124127 69358 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-326307
I0919 22:23:21.143109 69358 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/config.json ...
I0919 22:23:21.143414 69358 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0919 22:23:21.143466 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307
I0919 22:23:21.162970 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307/id_rsa Username:docker}
I0919 22:23:21.258062 69358 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0919 22:23:21.263437 69358 start.go:128] duration metric: took 7.546444684s to createHost
I0919 22:23:21.263491 69358 start.go:83] releasing machines lock for "ha-326307", held for 7.546570423s
I0919 22:23:21.263561 69358 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-326307
I0919 22:23:21.282251 69358 ssh_runner.go:195] Run: cat /version.json
I0919 22:23:21.282309 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307
I0919 22:23:21.282391 69358 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0919 22:23:21.282539 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307
I0919 22:23:21.302076 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307/id_rsa Username:docker}
I0919 22:23:21.302858 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307/id_rsa Username:docker}
I0919 22:23:21.477003 69358 ssh_runner.go:195] Run: systemctl --version
I0919 22:23:21.481946 69358 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0919 22:23:21.486736 69358 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0919 22:23:21.519470 69358 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0919 22:23:21.519573 69358 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0919 22:23:21.549703 69358 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0919 22:23:21.549736 69358 start.go:495] detecting cgroup driver to use...
I0919 22:23:21.549772 69358 detect.go:190] detected "systemd" cgroup driver on host os
I0919 22:23:21.549813 69358 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0919 22:23:21.563897 69358 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0919 22:23:21.577043 69358 docker.go:218] disabling cri-docker service (if available) ...
I0919 22:23:21.577104 69358 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0919 22:23:21.591898 69358 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0919 22:23:21.607905 69358 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0919 22:23:21.677531 69358 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0919 22:23:21.749223 69358 docker.go:234] disabling docker service ...
I0919 22:23:21.749348 69358 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0919 22:23:21.771648 69358 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0919 22:23:21.786268 69358 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0919 22:23:21.864247 69358 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0919 22:23:21.930620 69358 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0919 22:23:21.943680 69358 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0919 22:23:21.963319 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0919 22:23:21.977473 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0919 22:23:21.989630 69358 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0919 22:23:21.989705 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0919 22:23:22.001778 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:23:22.013415 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0919 22:23:22.024683 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:23:22.036042 69358 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0919 22:23:22.047238 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0919 22:23:22.060239 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0919 22:23:22.074324 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0919 22:23:22.087081 69358 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0919 22:23:22.099883 69358 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0919 22:23:22.110348 69358 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:23:22.180253 69358 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0919 22:23:22.295748 69358 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I0919 22:23:22.295832 69358 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0919 22:23:22.300535 69358 start.go:563] Will wait 60s for crictl version
I0919 22:23:22.300597 69358 ssh_runner.go:195] Run: which crictl
I0919 22:23:22.304676 69358 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0919 22:23:22.344790 69358 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.27
RuntimeApiVersion: v1
I0919 22:23:22.344850 69358 ssh_runner.go:195] Run: containerd --version
I0919 22:23:22.371338 69358 ssh_runner.go:195] Run: containerd --version
I0919 22:23:22.400934 69358 out.go:179] * Preparing Kubernetes v1.34.0 on containerd 1.7.27 ...
I0919 22:23:22.402669 69358 cli_runner.go:164] Run: docker network inspect ha-326307 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:23:22.421952 69358 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0919 22:23:22.426523 69358 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:23:22.442415 69358 kubeadm.go:875] updating cluster {Name:ha-326307 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-326307 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIP
s:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetCli
entPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0919 22:23:22.442712 69358 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0919 22:23:22.442823 69358 ssh_runner.go:195] Run: sudo crictl images --output json
I0919 22:23:22.482684 69358 containerd.go:627] all images are preloaded for containerd runtime.
I0919 22:23:22.482710 69358 containerd.go:534] Images already preloaded, skipping extraction
I0919 22:23:22.482762 69358 ssh_runner.go:195] Run: sudo crictl images --output json
I0919 22:23:22.518500 69358 containerd.go:627] all images are preloaded for containerd runtime.
I0919 22:23:22.518526 69358 cache_images.go:85] Images are preloaded, skipping loading
I0919 22:23:22.518533 69358 kubeadm.go:926] updating node { 192.168.49.2 8443 v1.34.0 containerd true true} ...
I0919 22:23:22.518616 69358 kubeadm.go:938] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-326307 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:ha-326307 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0919 22:23:22.518668 69358 ssh_runner.go:195] Run: sudo crictl info
I0919 22:23:22.554956 69358 cni.go:84] Creating CNI manager for ""
I0919 22:23:22.554993 69358 cni.go:136] multinode detected (1 nodes found), recommending kindnet
I0919 22:23:22.555004 69358 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0919 22:23:22.555029 69358 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.34.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:ha-326307 NodeName:ha-326307 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernet
es/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0919 22:23:22.555176 69358 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "ha-326307"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0919 22:23:22.555209 69358 kube-vip.go:115] generating kube-vip config ...
I0919 22:23:22.555273 69358 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I0919 22:23:22.568901 69358 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I0919 22:23:22.569038 69358 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v1.0.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/super-admin.conf"
name: kubeconfig
status: {}
I0919 22:23:22.569091 69358 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0919 22:23:22.580223 69358 binaries.go:44] Found k8s binaries, skipping transfer
I0919 22:23:22.580317 69358 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube /etc/kubernetes/manifests
I0919 22:23:22.591268 69358 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (313 bytes)
I0919 22:23:22.612688 69358 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0919 22:23:22.636770 69358 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2221 bytes)
I0919 22:23:22.658657 69358 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1364 bytes)
I0919 22:23:22.681384 69358 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I0919 22:23:22.685531 69358 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:23:22.698340 69358 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:23:22.769217 69358 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0919 22:23:22.792280 69358 certs.go:68] Setting up /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307 for IP: 192.168.49.2
I0919 22:23:22.792300 69358 certs.go:194] generating shared ca certs ...
I0919 22:23:22.792315 69358 certs.go:226] acquiring lock for ca certs: {Name:mkd7a2e112725f042a76c7be63aef486d6b9bff2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:23:22.792509 69358 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21594-14678/.minikube/ca.key
I0919 22:23:22.792553 69358 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21594-14678/.minikube/proxy-client-ca.key
I0919 22:23:22.792563 69358 certs.go:256] generating profile certs ...
I0919 22:23:22.792630 69358 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.key
I0919 22:23:22.792643 69358 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.crt with IP's: []
I0919 22:23:22.975725 69358 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.crt ...
I0919 22:23:22.975759 69358 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.crt: {Name:mk32bca88dd6748516774b56251f96e4fc38a69f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:23:22.975973 69358 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.key ...
I0919 22:23:22.975990 69358 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.key: {Name:mkc0e836c004e527dbd2787dc00463a0715cf8a2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:23:22.976108 69358 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key.9685e226
I0919 22:23:22.976125 69358 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt.9685e226 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.254]
I0919 22:23:23.460427 69358 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt.9685e226 ...
I0919 22:23:23.460460 69358 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt.9685e226: {Name:mk98859e0e43a6d4b4da591dc89695908954cc81 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:23:23.460672 69358 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key.9685e226 ...
I0919 22:23:23.460693 69358 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key.9685e226: {Name:mk3473c1668aec72ec5a5598645b70e29415cdd6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:23:23.460941 69358 certs.go:381] copying /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt.9685e226 -> /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt
I0919 22:23:23.461078 69358 certs.go:385] copying /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key.9685e226 -> /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key
I0919 22:23:23.461207 69358 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.key
I0919 22:23:23.461233 69358 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.crt with IP's: []
I0919 22:23:23.489621 69358 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.crt ...
I0919 22:23:23.489652 69358 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.crt: {Name:mk06f3b4cfde33781bd7076ead00f94525257452 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:23:23.489837 69358 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.key ...
I0919 22:23:23.489860 69358 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.key: {Name:mk632a617a99ac85bf5a9b022d1173caf8e7b208 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:23:23.489978 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0919 22:23:23.490003 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0919 22:23:23.490018 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0919 22:23:23.490034 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0919 22:23:23.490051 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0919 22:23:23.490069 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0919 22:23:23.490087 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0919 22:23:23.490100 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0919 22:23:23.490185 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/18210.pem (1338 bytes)
W0919 22:23:23.490228 69358 certs.go:480] ignoring /home/jenkins/minikube-integration/21594-14678/.minikube/certs/18210_empty.pem, impossibly tiny 0 bytes
I0919 22:23:23.490238 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca-key.pem (1679 bytes)
I0919 22:23:23.490273 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem (1082 bytes)
I0919 22:23:23.490304 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/cert.pem (1123 bytes)
I0919 22:23:23.490333 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/key.pem (1675 bytes)
I0919 22:23:23.490390 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem (1708 bytes)
I0919 22:23:23.490435 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/18210.pem -> /usr/share/ca-certificates/18210.pem
I0919 22:23:23.490455 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem -> /usr/share/ca-certificates/182102.pem
I0919 22:23:23.490497 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0919 22:23:23.491033 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0919 22:23:23.517815 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0919 22:23:23.544857 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0919 22:23:23.571386 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0919 22:23:23.600966 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I0919 22:23:23.629855 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0919 22:23:23.657907 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0919 22:23:23.685564 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0919 22:23:23.713503 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/certs/18210.pem --> /usr/share/ca-certificates/18210.pem (1338 bytes)
I0919 22:23:23.745344 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem --> /usr/share/ca-certificates/182102.pem (1708 bytes)
I0919 22:23:23.774311 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0919 22:23:23.807603 69358 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0919 22:23:23.832523 69358 ssh_runner.go:195] Run: openssl version
I0919 22:23:23.839649 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/18210.pem && ln -fs /usr/share/ca-certificates/18210.pem /etc/ssl/certs/18210.pem"
I0919 22:23:23.851364 69358 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/18210.pem
I0919 22:23:23.856325 69358 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 19 22:20 /usr/share/ca-certificates/18210.pem
I0919 22:23:23.856396 69358 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/18210.pem
I0919 22:23:23.864469 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/18210.pem /etc/ssl/certs/51391683.0"
I0919 22:23:23.876649 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/182102.pem && ln -fs /usr/share/ca-certificates/182102.pem /etc/ssl/certs/182102.pem"
I0919 22:23:23.888129 69358 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/182102.pem
I0919 22:23:23.892889 69358 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 19 22:20 /usr/share/ca-certificates/182102.pem
I0919 22:23:23.892949 69358 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/182102.pem
I0919 22:23:23.901167 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/182102.pem /etc/ssl/certs/3ec20f2e.0"
I0919 22:23:23.912487 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0919 22:23:23.924831 69358 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0919 22:23:23.929289 69358 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 19 22:15 /usr/share/ca-certificates/minikubeCA.pem
I0919 22:23:23.929357 69358 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0919 22:23:23.937110 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0919 22:23:23.948517 69358 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0919 22:23:23.952948 69358 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0919 22:23:23.953011 69358 kubeadm.go:392] StartCluster: {Name:ha-326307 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-326307 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[
] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClient
Path: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0919 22:23:23.953080 69358 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I0919 22:23:23.953122 69358 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I0919 22:23:23.991138 69358 cri.go:89] found id: ""
I0919 22:23:23.991247 69358 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0919 22:23:24.003111 69358 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0919 22:23:24.013643 69358 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0919 22:23:24.013714 69358 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0919 22:23:24.024557 69358 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0919 22:23:24.024576 69358 kubeadm.go:157] found existing configuration files:
I0919 22:23:24.024633 69358 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0919 22:23:24.035252 69358 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0919 22:23:24.035322 69358 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0919 22:23:24.045590 69358 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0919 22:23:24.056529 69358 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0919 22:23:24.056590 69358 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0919 22:23:24.066716 69358 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0919 22:23:24.077570 69358 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0919 22:23:24.077653 69358 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0919 22:23:24.088177 69358 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0919 22:23:24.098372 69358 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0919 22:23:24.098426 69358 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0919 22:23:24.108265 69358 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0919 22:23:24.149643 69358 kubeadm.go:310] [init] Using Kubernetes version: v1.34.0
I0919 22:23:24.149730 69358 kubeadm.go:310] [preflight] Running pre-flight checks
I0919 22:23:24.166048 69358 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0919 22:23:24.166117 69358 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1037-gcp[0m
I0919 22:23:24.166172 69358 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0919 22:23:24.166213 69358 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0919 22:23:24.166275 69358 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0919 22:23:24.166357 69358 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0919 22:23:24.166446 69358 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0919 22:23:24.166536 69358 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0919 22:23:24.166608 69358 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0919 22:23:24.166683 69358 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0919 22:23:24.166760 69358 kubeadm.go:310] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I0919 22:23:24.230351 69358 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0919 22:23:24.230487 69358 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0919 22:23:24.230602 69358 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0919 22:23:24.238806 69358 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0919 22:23:24.243498 69358 out.go:252] - Generating certificates and keys ...
I0919 22:23:24.243610 69358 kubeadm.go:310] [certs] Using existing ca certificate authority
I0919 22:23:24.243715 69358 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0919 22:23:24.335199 69358 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0919 22:23:24.361175 69358 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0919 22:23:24.769077 69358 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0919 22:23:25.053293 69358 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0919 22:23:25.392067 69358 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0919 22:23:25.392251 69358 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [ha-326307 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0919 22:23:25.629558 69358 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0919 22:23:25.629706 69358 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [ha-326307 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0919 22:23:26.141828 69358 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0919 22:23:26.343650 69358 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0919 22:23:26.737207 69358 kubeadm.go:310] [certs] Generating "sa" key and public key
I0919 22:23:26.737292 69358 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0919 22:23:27.020543 69358 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0919 22:23:27.208963 69358 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0919 22:23:27.382044 69358 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0919 22:23:27.660395 69358 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0919 22:23:27.867964 69358 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0919 22:23:27.868475 69358 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0919 22:23:27.870857 69358 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0919 22:23:27.873408 69358 out.go:252] - Booting up control plane ...
I0919 22:23:27.873545 69358 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0919 22:23:27.873665 69358 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0919 22:23:27.873811 69358 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0919 22:23:27.884709 69358 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0919 22:23:27.884874 69358 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0919 22:23:27.892815 69358 kubeadm.go:310] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0919 22:23:27.893043 69358 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0919 22:23:27.893108 69358 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0919 22:23:27.981591 69358 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0919 22:23:27.981772 69358 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0919 22:23:29.484085 69358 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.501867716s
I0919 22:23:29.488057 69358 kubeadm.go:310] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I0919 22:23:29.488269 69358 kubeadm.go:310] [control-plane-check] Checking kube-apiserver at https://192.168.49.2:8443/livez
I0919 22:23:29.488401 69358 kubeadm.go:310] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I0919 22:23:29.488636 69358 kubeadm.go:310] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I0919 22:23:31.058022 69358 kubeadm.go:310] [control-plane-check] kube-controller-manager is healthy after 1.569932465s
I0919 22:23:31.762139 69358 kubeadm.go:310] [control-plane-check] kube-scheduler is healthy after 2.27419796s
I0919 22:23:33.991284 69358 kubeadm.go:310] [control-plane-check] kube-apiserver is healthy after 4.503282233s
I0919 22:23:34.005767 69358 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0919 22:23:34.017935 69358 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0919 22:23:34.032336 69358 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0919 22:23:34.032534 69358 kubeadm.go:310] [mark-control-plane] Marking the node ha-326307 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0919 22:23:34.042496 69358 kubeadm.go:310] [bootstrap-token] Using token: ym5hq4.pw1tvtip1io4ljbf
I0919 22:23:34.044381 69358 out.go:252] - Configuring RBAC rules ...
I0919 22:23:34.044558 69358 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0919 22:23:34.048649 69358 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0919 22:23:34.057509 69358 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0919 22:23:34.061297 69358 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0919 22:23:34.064926 69358 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0919 22:23:34.069534 69358 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0919 22:23:34.399239 69358 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0919 22:23:34.818126 69358 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0919 22:23:35.398001 69358 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0919 22:23:35.398907 69358 kubeadm.go:310]
I0919 22:23:35.399007 69358 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0919 22:23:35.399035 69358 kubeadm.go:310]
I0919 22:23:35.399120 69358 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0919 22:23:35.399149 69358 kubeadm.go:310]
I0919 22:23:35.399207 69358 kubeadm.go:310] mkdir -p $HOME/.kube
I0919 22:23:35.399301 69358 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0919 22:23:35.399350 69358 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0919 22:23:35.399356 69358 kubeadm.go:310]
I0919 22:23:35.399402 69358 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0919 22:23:35.399408 69358 kubeadm.go:310]
I0919 22:23:35.399470 69358 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0919 22:23:35.399481 69358 kubeadm.go:310]
I0919 22:23:35.399554 69358 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0919 22:23:35.399644 69358 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0919 22:23:35.399706 69358 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0919 22:23:35.399712 69358 kubeadm.go:310]
I0919 22:23:35.399803 69358 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0919 22:23:35.399888 69358 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0919 22:23:35.399892 69358 kubeadm.go:310]
I0919 22:23:35.399971 69358 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token ym5hq4.pw1tvtip1io4ljbf \
I0919 22:23:35.400068 69358 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:dae3dd920fb027024a058f7784382f806dfdbf0483a893c299b72dd41dc8aff6 \
I0919 22:23:35.400089 69358 kubeadm.go:310] --control-plane
I0919 22:23:35.400093 69358 kubeadm.go:310]
I0919 22:23:35.400204 69358 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0919 22:23:35.400217 69358 kubeadm.go:310]
I0919 22:23:35.400285 69358 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token ym5hq4.pw1tvtip1io4ljbf \
I0919 22:23:35.400382 69358 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:dae3dd920fb027024a058f7784382f806dfdbf0483a893c299b72dd41dc8aff6
I0919 22:23:35.403119 69358 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1037-gcp\n", err: exit status 1
I0919 22:23:35.403274 69358 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0919 22:23:35.403305 69358 cni.go:84] Creating CNI manager for ""
I0919 22:23:35.403317 69358 cni.go:136] multinode detected (1 nodes found), recommending kindnet
I0919 22:23:35.407302 69358 out.go:179] * Configuring CNI (Container Networking Interface) ...
I0919 22:23:35.409983 69358 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0919 22:23:35.415011 69358 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.0/kubectl ...
I0919 22:23:35.415039 69358 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I0919 22:23:35.436210 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0919 22:23:35.679694 69358 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0919 22:23:35.679756 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:23:35.679779 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes ha-326307 minikube.k8s.io/updated_at=2025_09_19T22_23_35_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=6e37ee63f758843bb5fe33c3a528c564c4b83d53 minikube.k8s.io/name=ha-326307 minikube.k8s.io/primary=true
I0919 22:23:35.787076 69358 ops.go:34] apiserver oom_adj: -16
I0919 22:23:35.787237 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:23:36.287327 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:23:36.787300 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:23:37.287415 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:23:37.788066 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:23:38.287401 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:23:38.787731 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:23:39.288028 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:23:39.788301 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:23:39.864456 69358 kubeadm.go:1105] duration metric: took 4.184765822s to wait for elevateKubeSystemPrivileges
I0919 22:23:39.864500 69358 kubeadm.go:394] duration metric: took 15.911493151s to StartCluster
I0919 22:23:39.864524 69358 settings.go:142] acquiring lock: {Name:mkf4af4eab91076a115aed3b017088a6f5e76093 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:23:39.864601 69358 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21594-14678/kubeconfig
I0919 22:23:39.865911 69358 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-14678/kubeconfig: {Name:mk5ed8b51261e712efaf73ae956ec07e6a42ac25 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:23:39.866255 69358 start.go:233] HA (multi-control plane) cluster: will skip waiting for primary control-plane node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0919 22:23:39.866275 69358 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0919 22:23:39.866288 69358 start.go:241] waiting for startup goroutines ...
I0919 22:23:39.866297 69358 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0919 22:23:39.866377 69358 addons.go:69] Setting storage-provisioner=true in profile "ha-326307"
I0919 22:23:39.866398 69358 addons.go:238] Setting addon storage-provisioner=true in "ha-326307"
I0919 22:23:39.866400 69358 addons.go:69] Setting default-storageclass=true in profile "ha-326307"
I0919 22:23:39.866428 69358 host.go:66] Checking if "ha-326307" exists ...
I0919 22:23:39.866523 69358 config.go:182] Loaded profile config "ha-326307": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0919 22:23:39.866434 69358 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "ha-326307"
I0919 22:23:39.866921 69358 cli_runner.go:164] Run: docker container inspect ha-326307 --format={{.State.Status}}
I0919 22:23:39.867012 69358 cli_runner.go:164] Run: docker container inspect ha-326307 --format={{.State.Status}}
I0919 22:23:39.892851 69358 kapi.go:59] client config for ha-326307: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.crt", KeyFile:"/home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.key", CAFile:"/home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(nil)
}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4a00), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0919 22:23:39.893863 69358 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true
I0919 22:23:39.893944 69358 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false
I0919 22:23:39.893953 69358 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false
I0919 22:23:39.894002 69358 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false
I0919 22:23:39.894061 69358 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false
I0919 22:23:39.893888 69358 cert_rotation.go:141] "Starting client certificate rotation controller" logger="tls-transport-cache"
I0919 22:23:39.894642 69358 addons.go:238] Setting addon default-storageclass=true in "ha-326307"
I0919 22:23:39.894691 69358 host.go:66] Checking if "ha-326307" exists ...
I0919 22:23:39.895196 69358 cli_runner.go:164] Run: docker container inspect ha-326307 --format={{.State.Status}}
I0919 22:23:39.895724 69358 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0919 22:23:39.897293 69358 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0919 22:23:39.897315 69358 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0919 22:23:39.897386 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307
I0919 22:23:39.923915 69358 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I0919 22:23:39.923939 69358 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0919 22:23:39.924001 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307
I0919 22:23:39.926323 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307/id_rsa Username:docker}
I0919 22:23:39.953300 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307/id_rsa Username:docker}
I0919 22:23:39.968501 69358 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0919 22:23:40.065441 69358 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0919 22:23:40.083647 69358 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0919 22:23:40.190461 69358 start.go:976] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0919 22:23:40.433561 69358 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I0919 22:23:40.435567 69358 addons.go:514] duration metric: took 569.25898ms for enable addons: enabled=[storage-provisioner default-storageclass]
I0919 22:23:40.435633 69358 start.go:246] waiting for cluster config update ...
I0919 22:23:40.435651 69358 start.go:255] writing updated cluster config ...
I0919 22:23:40.437510 69358 out.go:203]
I0919 22:23:40.439070 69358 config.go:182] Loaded profile config "ha-326307": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0919 22:23:40.439141 69358 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/config.json ...
I0919 22:23:40.441238 69358 out.go:179] * Starting "ha-326307-m02" control-plane node in "ha-326307" cluster
I0919 22:23:40.443382 69358 cache.go:123] Beginning downloading kic base image for docker with containerd
I0919 22:23:40.445749 69358 out.go:179] * Pulling base image v0.0.48 ...
I0919 22:23:40.447079 69358 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0919 22:23:40.447132 69358 cache.go:58] Caching tarball of preloaded images
I0919 22:23:40.447229 69358 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0919 22:23:40.447308 69358 preload.go:172] Found /home/jenkins/minikube-integration/21594-14678/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I0919 22:23:40.447326 69358 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on containerd
I0919 22:23:40.447427 69358 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/config.json ...
I0919 22:23:40.470325 69358 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0919 22:23:40.470347 69358 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0919 22:23:40.470366 69358 cache.go:232] Successfully downloaded all kic artifacts
I0919 22:23:40.470391 69358 start.go:360] acquireMachinesLock for ha-326307-m02: {Name:mk4919a9b19250804b0f53d01bcd11efaf9a431f Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0919 22:23:40.470518 69358 start.go:364] duration metric: took 88.309µs to acquireMachinesLock for "ha-326307-m02"
I0919 22:23:40.470552 69358 start.go:93] Provisioning new machine with config: &{Name:ha-326307 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-326307 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true} {Name:m02 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 Mou
ntOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name:m02 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0919 22:23:40.470618 69358 start.go:125] createHost starting for "m02" (driver="docker")
I0919 22:23:40.473495 69358 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0919 22:23:40.473607 69358 start.go:159] libmachine.API.Create for "ha-326307" (driver="docker")
I0919 22:23:40.473631 69358 client.go:168] LocalClient.Create starting
I0919 22:23:40.473689 69358 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem
I0919 22:23:40.473724 69358 main.go:141] libmachine: Decoding PEM data...
I0919 22:23:40.473734 69358 main.go:141] libmachine: Parsing certificate...
I0919 22:23:40.473828 69358 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-14678/.minikube/certs/cert.pem
I0919 22:23:40.473853 69358 main.go:141] libmachine: Decoding PEM data...
I0919 22:23:40.473861 69358 main.go:141] libmachine: Parsing certificate...
I0919 22:23:40.474095 69358 cli_runner.go:164] Run: docker network inspect ha-326307 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:23:40.493916 69358 network_create.go:77] Found existing network {name:ha-326307 subnet:0xc000ad7620 gateway:[0 0 0 0 0 0 0 0 0 0 255 255 192 168 49 1] mtu:1500}
I0919 22:23:40.493972 69358 kic.go:121] calculated static IP "192.168.49.3" for the "ha-326307-m02" container
I0919 22:23:40.494055 69358 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0919 22:23:40.516112 69358 cli_runner.go:164] Run: docker volume create ha-326307-m02 --label name.minikube.sigs.k8s.io=ha-326307-m02 --label created_by.minikube.sigs.k8s.io=true
I0919 22:23:40.537046 69358 oci.go:103] Successfully created a docker volume ha-326307-m02
I0919 22:23:40.537137 69358 cli_runner.go:164] Run: docker run --rm --name ha-326307-m02-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-326307-m02 --entrypoint /usr/bin/test -v ha-326307-m02:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0919 22:23:40.991997 69358 oci.go:107] Successfully prepared a docker volume ha-326307-m02
I0919 22:23:40.992038 69358 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0919 22:23:40.992061 69358 kic.go:194] Starting extracting preloaded images to volume ...
I0919 22:23:40.992121 69358 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-14678/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-326307-m02:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0919 22:23:45.362629 69358 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-14678/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-326307-m02:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (4.370467998s)
I0919 22:23:45.362666 69358 kic.go:203] duration metric: took 4.370603938s to extract preloaded images to volume ...
W0919 22:23:45.362777 69358 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0919 22:23:45.362811 69358 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0919 22:23:45.362846 69358 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0919 22:23:45.417833 69358 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ha-326307-m02 --name ha-326307-m02 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-326307-m02 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ha-326307-m02 --network ha-326307 --ip 192.168.49.3 --volume ha-326307-m02:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0919 22:23:45.744363 69358 cli_runner.go:164] Run: docker container inspect ha-326307-m02 --format={{.State.Running}}
I0919 22:23:45.768456 69358 cli_runner.go:164] Run: docker container inspect ha-326307-m02 --format={{.State.Status}}
I0919 22:23:45.789293 69358 cli_runner.go:164] Run: docker exec ha-326307-m02 stat /var/lib/dpkg/alternatives/iptables
I0919 22:23:45.846760 69358 oci.go:144] the created container "ha-326307-m02" has a running status.
I0919 22:23:45.846794 69358 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m02/id_rsa...
I0919 22:23:46.005236 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m02/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0919 22:23:46.005288 69358 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m02/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0919 22:23:46.042640 69358 cli_runner.go:164] Run: docker container inspect ha-326307-m02 --format={{.State.Status}}
I0919 22:23:46.067424 69358 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0919 22:23:46.067455 69358 kic_runner.go:114] Args: [docker exec --privileged ha-326307-m02 chown docker:docker /home/docker/.ssh/authorized_keys]
I0919 22:23:46.132729 69358 cli_runner.go:164] Run: docker container inspect ha-326307-m02 --format={{.State.Status}}
I0919 22:23:46.155854 69358 machine.go:93] provisionDockerMachine start ...
I0919 22:23:46.155967 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m02
I0919 22:23:46.177181 69358 main.go:141] libmachine: Using SSH client type: native
I0919 22:23:46.177511 69358 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0919 22:23:46.177533 69358 main.go:141] libmachine: About to run SSH command:
hostname
I0919 22:23:46.320054 69358 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-326307-m02
I0919 22:23:46.320089 69358 ubuntu.go:182] provisioning hostname "ha-326307-m02"
I0919 22:23:46.320185 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m02
I0919 22:23:46.341740 69358 main.go:141] libmachine: Using SSH client type: native
I0919 22:23:46.341951 69358 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0919 22:23:46.341965 69358 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-326307-m02 && echo "ha-326307-m02" | sudo tee /etc/hostname
I0919 22:23:46.497123 69358 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-326307-m02
I0919 22:23:46.497234 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m02
I0919 22:23:46.520214 69358 main.go:141] libmachine: Using SSH client type: native
I0919 22:23:46.520436 69358 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0919 22:23:46.520455 69358 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-326307-m02' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-326307-m02/g' /etc/hosts;
else
echo '127.0.1.1 ha-326307-m02' | sudo tee -a /etc/hosts;
fi
fi
I0919 22:23:46.659417 69358 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0919 22:23:46.659458 69358 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21594-14678/.minikube CaCertPath:/home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21594-14678/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21594-14678/.minikube}
I0919 22:23:46.659492 69358 ubuntu.go:190] setting up certificates
I0919 22:23:46.659505 69358 provision.go:84] configureAuth start
I0919 22:23:46.659556 69358 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-326307-m02
I0919 22:23:46.679498 69358 provision.go:143] copyHostCerts
I0919 22:23:46.679551 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/21594-14678/.minikube/ca.pem
I0919 22:23:46.679598 69358 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-14678/.minikube/ca.pem, removing ...
I0919 22:23:46.679605 69358 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-14678/.minikube/ca.pem
I0919 22:23:46.679712 69358 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21594-14678/.minikube/ca.pem (1082 bytes)
I0919 22:23:46.679851 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/21594-14678/.minikube/cert.pem
I0919 22:23:46.679882 69358 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-14678/.minikube/cert.pem, removing ...
I0919 22:23:46.679893 69358 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-14678/.minikube/cert.pem
I0919 22:23:46.679947 69358 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21594-14678/.minikube/cert.pem (1123 bytes)
I0919 22:23:46.680043 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/21594-14678/.minikube/key.pem
I0919 22:23:46.680141 69358 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-14678/.minikube/key.pem, removing ...
I0919 22:23:46.680185 69358 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-14678/.minikube/key.pem
I0919 22:23:46.680251 69358 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21594-14678/.minikube/key.pem (1675 bytes)
I0919 22:23:46.680367 69358 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21594-14678/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca-key.pem org=jenkins.ha-326307-m02 san=[127.0.0.1 192.168.49.3 ha-326307-m02 localhost minikube]
I0919 22:23:46.869190 69358 provision.go:177] copyRemoteCerts
I0919 22:23:46.869251 69358 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0919 22:23:46.869285 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m02
I0919 22:23:46.888798 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m02/id_rsa Username:docker}
I0919 22:23:46.988385 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0919 22:23:46.988452 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0919 22:23:47.018227 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/machines/server.pem -> /etc/docker/server.pem
I0919 22:23:47.018299 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0919 22:23:47.046810 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0919 22:23:47.046866 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0919 22:23:47.074372 69358 provision.go:87] duration metric: took 414.855982ms to configureAuth
I0919 22:23:47.074400 69358 ubuntu.go:206] setting minikube options for container-runtime
I0919 22:23:47.074581 69358 config.go:182] Loaded profile config "ha-326307": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0919 22:23:47.074598 69358 machine.go:96] duration metric: took 918.712366ms to provisionDockerMachine
I0919 22:23:47.074607 69358 client.go:171] duration metric: took 6.600969352s to LocalClient.Create
I0919 22:23:47.074631 69358 start.go:167] duration metric: took 6.601023702s to libmachine.API.Create "ha-326307"
I0919 22:23:47.074642 69358 start.go:293] postStartSetup for "ha-326307-m02" (driver="docker")
I0919 22:23:47.074650 69358 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0919 22:23:47.074721 69358 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0919 22:23:47.074767 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m02
I0919 22:23:47.094538 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m02/id_rsa Username:docker}
I0919 22:23:47.195213 69358 ssh_runner.go:195] Run: cat /etc/os-release
I0919 22:23:47.199088 69358 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0919 22:23:47.199139 69358 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0919 22:23:47.199181 69358 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0919 22:23:47.199191 69358 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0919 22:23:47.199215 69358 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-14678/.minikube/addons for local assets ...
I0919 22:23:47.199276 69358 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-14678/.minikube/files for local assets ...
I0919 22:23:47.199378 69358 filesync.go:149] local asset: /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem -> 182102.pem in /etc/ssl/certs
I0919 22:23:47.199394 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem -> /etc/ssl/certs/182102.pem
I0919 22:23:47.199502 69358 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0919 22:23:47.209642 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem --> /etc/ssl/certs/182102.pem (1708 bytes)
I0919 22:23:47.240945 69358 start.go:296] duration metric: took 166.288086ms for postStartSetup
I0919 22:23:47.241383 69358 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-326307-m02
I0919 22:23:47.261061 69358 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/config.json ...
I0919 22:23:47.261460 69358 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0919 22:23:47.261513 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m02
I0919 22:23:47.280359 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m02/id_rsa Username:docker}
I0919 22:23:47.374609 69358 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0919 22:23:47.379255 69358 start.go:128] duration metric: took 6.908623332s to createHost
I0919 22:23:47.379283 69358 start.go:83] releasing machines lock for "ha-326307-m02", held for 6.908753842s
I0919 22:23:47.379346 69358 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-326307-m02
I0919 22:23:47.400418 69358 out.go:179] * Found network options:
I0919 22:23:47.401854 69358 out.go:179] - NO_PROXY=192.168.49.2
W0919 22:23:47.403072 69358 proxy.go:120] fail to check proxy env: Error ip not in block
W0919 22:23:47.403133 69358 proxy.go:120] fail to check proxy env: Error ip not in block
I0919 22:23:47.403263 69358 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0919 22:23:47.403266 69358 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0919 22:23:47.403326 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m02
I0919 22:23:47.403332 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m02
I0919 22:23:47.423928 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m02/id_rsa Username:docker}
I0919 22:23:47.424218 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m02/id_rsa Username:docker}
I0919 22:23:47.597529 69358 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0919 22:23:47.630263 69358 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0919 22:23:47.630334 69358 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0919 22:23:47.661706 69358 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0919 22:23:47.661733 69358 start.go:495] detecting cgroup driver to use...
I0919 22:23:47.661772 69358 detect.go:190] detected "systemd" cgroup driver on host os
I0919 22:23:47.661826 69358 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0919 22:23:47.675485 69358 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0919 22:23:47.687726 69358 docker.go:218] disabling cri-docker service (if available) ...
I0919 22:23:47.687780 69358 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0919 22:23:47.701818 69358 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0919 22:23:47.717912 69358 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0919 22:23:47.789825 69358 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0919 22:23:47.863188 69358 docker.go:234] disabling docker service ...
I0919 22:23:47.863267 69358 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0919 22:23:47.881757 69358 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0919 22:23:47.893830 69358 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0919 22:23:47.963004 69358 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0919 22:23:48.034120 69358 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0919 22:23:48.046843 69358 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0919 22:23:48.065279 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0919 22:23:48.078269 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0919 22:23:48.089105 69358 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0919 22:23:48.089186 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0919 22:23:48.099867 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:23:48.111076 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0919 22:23:48.122049 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:23:48.132648 69358 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0919 22:23:48.142263 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0919 22:23:48.152876 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0919 22:23:48.163459 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0919 22:23:48.174096 69358 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0919 22:23:48.183483 69358 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0919 22:23:48.192780 69358 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:23:48.261004 69358 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0919 22:23:48.364434 69358 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I0919 22:23:48.364508 69358 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0919 22:23:48.368726 69358 start.go:563] Will wait 60s for crictl version
I0919 22:23:48.368792 69358 ssh_runner.go:195] Run: which crictl
I0919 22:23:48.372683 69358 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0919 22:23:48.409110 69358 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.27
RuntimeApiVersion: v1
I0919 22:23:48.409200 69358 ssh_runner.go:195] Run: containerd --version
I0919 22:23:48.433389 69358 ssh_runner.go:195] Run: containerd --version
I0919 22:23:48.460529 69358 out.go:179] * Preparing Kubernetes v1.34.0 on containerd 1.7.27 ...
I0919 22:23:48.462207 69358 out.go:179] - env NO_PROXY=192.168.49.2
I0919 22:23:48.464087 69358 cli_runner.go:164] Run: docker network inspect ha-326307 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:23:48.482217 69358 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0919 22:23:48.486620 69358 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:23:48.498806 69358 mustload.go:65] Loading cluster: ha-326307
I0919 22:23:48.499032 69358 config.go:182] Loaded profile config "ha-326307": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0919 22:23:48.499315 69358 cli_runner.go:164] Run: docker container inspect ha-326307 --format={{.State.Status}}
I0919 22:23:48.518576 69358 host.go:66] Checking if "ha-326307" exists ...
I0919 22:23:48.518850 69358 certs.go:68] Setting up /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307 for IP: 192.168.49.3
I0919 22:23:48.518866 69358 certs.go:194] generating shared ca certs ...
I0919 22:23:48.518885 69358 certs.go:226] acquiring lock for ca certs: {Name:mkd7a2e112725f042a76c7be63aef486d6b9bff2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:23:48.519012 69358 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21594-14678/.minikube/ca.key
I0919 22:23:48.519082 69358 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21594-14678/.minikube/proxy-client-ca.key
I0919 22:23:48.519096 69358 certs.go:256] generating profile certs ...
I0919 22:23:48.519222 69358 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.key
I0919 22:23:48.519259 69358 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key.3b537cd4
I0919 22:23:48.519288 69358 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt.3b537cd4 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.3 192.168.49.254]
I0919 22:23:48.963393 69358 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt.3b537cd4 ...
I0919 22:23:48.963428 69358 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt.3b537cd4: {Name:mk381f64cc0991e3a6417e9586b9565eb7a8dbf2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:23:48.963635 69358 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key.3b537cd4 ...
I0919 22:23:48.963660 69358 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key.3b537cd4: {Name:mk4dbead0b9c36c7a3635520729a1eb2d4b33f13 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:23:48.963762 69358 certs.go:381] copying /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt.3b537cd4 -> /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt
I0919 22:23:48.963935 69358 certs.go:385] copying /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key.3b537cd4 -> /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key
I0919 22:23:48.964103 69358 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.key
I0919 22:23:48.964120 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0919 22:23:48.964138 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0919 22:23:48.964166 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0919 22:23:48.964183 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0919 22:23:48.964200 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0919 22:23:48.964218 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0919 22:23:48.964234 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0919 22:23:48.964251 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0919 22:23:48.964313 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/18210.pem (1338 bytes)
W0919 22:23:48.964355 69358 certs.go:480] ignoring /home/jenkins/minikube-integration/21594-14678/.minikube/certs/18210_empty.pem, impossibly tiny 0 bytes
I0919 22:23:48.964366 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca-key.pem (1679 bytes)
I0919 22:23:48.964406 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem (1082 bytes)
I0919 22:23:48.964438 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/cert.pem (1123 bytes)
I0919 22:23:48.964471 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/key.pem (1675 bytes)
I0919 22:23:48.964528 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem (1708 bytes)
I0919 22:23:48.964570 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/18210.pem -> /usr/share/ca-certificates/18210.pem
I0919 22:23:48.964592 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem -> /usr/share/ca-certificates/182102.pem
I0919 22:23:48.964612 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0919 22:23:48.964731 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307
I0919 22:23:48.983907 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307/id_rsa Username:docker}
I0919 22:23:49.073692 69358 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.pub
I0919 22:23:49.078819 69358 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.pub --> memory (451 bytes)
I0919 22:23:49.094234 69358 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.key
I0919 22:23:49.099593 69358 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.key --> memory (1679 bytes)
I0919 22:23:49.113663 69358 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.crt
I0919 22:23:49.117744 69358 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.crt --> memory (1123 bytes)
I0919 22:23:49.133048 69358 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.key
I0919 22:23:49.136861 69358 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.key --> memory (1679 bytes)
I0919 22:23:49.150734 69358 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.crt
I0919 22:23:49.154901 69358 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.crt --> memory (1094 bytes)
I0919 22:23:49.169388 69358 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.key
I0919 22:23:49.173566 69358 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.key --> memory (1675 bytes)
I0919 22:23:49.188070 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0919 22:23:49.215594 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0919 22:23:49.243561 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0919 22:23:49.271624 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0919 22:23:49.301814 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1436 bytes)
I0919 22:23:49.332556 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0919 22:23:49.360723 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0919 22:23:49.388872 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0919 22:23:49.417316 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/certs/18210.pem --> /usr/share/ca-certificates/18210.pem (1338 bytes)
I0919 22:23:49.448722 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem --> /usr/share/ca-certificates/182102.pem (1708 bytes)
I0919 22:23:49.476877 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0919 22:23:49.504914 69358 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.pub (451 bytes)
I0919 22:23:49.524969 69358 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.key (1679 bytes)
I0919 22:23:49.544942 69358 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.crt (1123 bytes)
I0919 22:23:49.564506 69358 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.key (1679 bytes)
I0919 22:23:49.584887 69358 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.crt (1094 bytes)
I0919 22:23:49.605725 69358 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.key (1675 bytes)
I0919 22:23:49.625552 69358 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (744 bytes)
I0919 22:23:49.645811 69358 ssh_runner.go:195] Run: openssl version
I0919 22:23:49.652062 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0919 22:23:49.664544 69358 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0919 22:23:49.668823 69358 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 19 22:15 /usr/share/ca-certificates/minikubeCA.pem
I0919 22:23:49.668889 69358 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0919 22:23:49.676892 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0919 22:23:49.688737 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/18210.pem && ln -fs /usr/share/ca-certificates/18210.pem /etc/ssl/certs/18210.pem"
I0919 22:23:49.699741 69358 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/18210.pem
I0919 22:23:49.703762 69358 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 19 22:20 /usr/share/ca-certificates/18210.pem
I0919 22:23:49.703823 69358 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/18210.pem
I0919 22:23:49.711311 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/18210.pem /etc/ssl/certs/51391683.0"
I0919 22:23:49.721987 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/182102.pem && ln -fs /usr/share/ca-certificates/182102.pem /etc/ssl/certs/182102.pem"
I0919 22:23:49.732874 69358 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/182102.pem
I0919 22:23:49.737289 69358 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 19 22:20 /usr/share/ca-certificates/182102.pem
I0919 22:23:49.737351 69358 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/182102.pem
I0919 22:23:49.745312 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/182102.pem /etc/ssl/certs/3ec20f2e.0"
I0919 22:23:49.756384 69358 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0919 22:23:49.760242 69358 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0919 22:23:49.760315 69358 kubeadm.go:926] updating node {m02 192.168.49.3 8443 v1.34.0 containerd true true} ...
I0919 22:23:49.760415 69358 kubeadm.go:938] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-326307-m02 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.3
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:ha-326307 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0919 22:23:49.760438 69358 kube-vip.go:115] generating kube-vip config ...
I0919 22:23:49.760476 69358 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I0919 22:23:49.773427 69358 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I0919 22:23:49.773499 69358 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v1.0.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/admin.conf"
name: kubeconfig
status: {}
I0919 22:23:49.773549 69358 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0919 22:23:49.784237 69358 binaries.go:44] Found k8s binaries, skipping transfer
I0919 22:23:49.784306 69358 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /etc/kubernetes/manifests
I0919 22:23:49.794534 69358 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
I0919 22:23:49.814529 69358 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0919 22:23:49.837846 69358 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1358 bytes)
I0919 22:23:49.859421 69358 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I0919 22:23:49.863859 69358 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:23:49.876721 69358 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:23:49.948089 69358 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0919 22:23:49.971010 69358 host.go:66] Checking if "ha-326307" exists ...
I0919 22:23:49.971327 69358 start.go:317] joinCluster: &{Name:ha-326307 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-326307 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[]
DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[]
MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0919 22:23:49.971508 69358 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm token create --print-join-command --ttl=0"
I0919 22:23:49.971618 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307
I0919 22:23:49.992535 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307/id_rsa Username:docker}
I0919 22:23:50.137695 69358 start.go:343] trying to join control-plane node "m02" to cluster: &{Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0919 22:23:50.137740 69358 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token kb90tj.om7zof6htice1y8z --discovery-token-ca-cert-hash sha256:dae3dd920fb027024a058f7784382f806dfdbf0483a893c299b72dd41dc8aff6 --ignore-preflight-errors=all --cri-socket unix:///run/containerd/containerd.sock --node-name=ha-326307-m02 --control-plane --apiserver-advertise-address=192.168.49.3 --apiserver-bind-port=8443"
I0919 22:24:08.633363 69358 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token kb90tj.om7zof6htice1y8z --discovery-token-ca-cert-hash sha256:dae3dd920fb027024a058f7784382f806dfdbf0483a893c299b72dd41dc8aff6 --ignore-preflight-errors=all --cri-socket unix:///run/containerd/containerd.sock --node-name=ha-326307-m02 --control-plane --apiserver-advertise-address=192.168.49.3 --apiserver-bind-port=8443": (18.495537277s)
I0919 22:24:08.633404 69358 ssh_runner.go:195] Run: /bin/bash -c "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet"
I0919 22:24:08.849981 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes ha-326307-m02 minikube.k8s.io/updated_at=2025_09_19T22_24_08_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=6e37ee63f758843bb5fe33c3a528c564c4b83d53 minikube.k8s.io/name=ha-326307 minikube.k8s.io/primary=false
I0919 22:24:08.928109 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig taint nodes ha-326307-m02 node-role.kubernetes.io/control-plane:NoSchedule-
I0919 22:24:09.011507 69358 start.go:319] duration metric: took 19.040175049s to joinCluster
I0919 22:24:09.011590 69358 start.go:235] Will wait 6m0s for node &{Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0919 22:24:09.011816 69358 config.go:182] Loaded profile config "ha-326307": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0919 22:24:09.013756 69358 out.go:179] * Verifying Kubernetes components...
I0919 22:24:09.015232 69358 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:09.115618 69358 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0919 22:24:09.130578 69358 kapi.go:59] client config for ha-326307: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.crt", KeyFile:"/home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.key", CAFile:"/home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(nil)
}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4a00), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
W0919 22:24:09.130645 69358 kubeadm.go:483] Overriding stale ClientConfig host https://192.168.49.254:8443 with https://192.168.49.2:8443
I0919 22:24:09.130869 69358 node_ready.go:35] waiting up to 6m0s for node "ha-326307-m02" to be "Ready" ...
W0919 22:24:11.134373 69358 node_ready.go:57] node "ha-326307-m02" has "Ready":"False" status (will retry)
I0919 22:24:11.634655 69358 node_ready.go:49] node "ha-326307-m02" is "Ready"
I0919 22:24:11.634683 69358 node_ready.go:38] duration metric: took 2.503796185s for node "ha-326307-m02" to be "Ready" ...
I0919 22:24:11.634697 69358 api_server.go:52] waiting for apiserver process to appear ...
I0919 22:24:11.634751 69358 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0919 22:24:11.647782 69358 api_server.go:72] duration metric: took 2.636155477s to wait for apiserver process to appear ...
I0919 22:24:11.647812 69358 api_server.go:88] waiting for apiserver healthz status ...
I0919 22:24:11.647848 69358 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0919 22:24:11.652005 69358 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0919 22:24:11.652952 69358 api_server.go:141] control plane version: v1.34.0
I0919 22:24:11.652975 69358 api_server.go:131] duration metric: took 5.15649ms to wait for apiserver health ...
I0919 22:24:11.652984 69358 system_pods.go:43] waiting for kube-system pods to appear ...
I0919 22:24:11.657535 69358 system_pods.go:59] 17 kube-system pods found
I0919 22:24:11.657569 69358 system_pods.go:61] "coredns-66bc5c9577-9j5pw" [7d073e38-b63e-494d-bda0-3dde372a950b] Running
I0919 22:24:11.657577 69358 system_pods.go:61] "coredns-66bc5c9577-wqvzd" [64376c4d-1b82-490d-887d-7f628b134014] Running
I0919 22:24:11.657581 69358 system_pods.go:61] "etcd-ha-326307" [cc755641-9756-42fe-94ea-76d3167a2f67] Running
I0919 22:24:11.657586 69358 system_pods.go:61] "etcd-ha-326307-m02" [fe655813-ee01-420d-a127-9e43d85b3674] Pending
I0919 22:24:11.657591 69358 system_pods.go:61] "kindnet-gxnzs" [4fa827fc-0ba7-49b7-a225-e36d76241d92] Running
I0919 22:24:11.657598 69358 system_pods.go:61] "kindnet-mk6pv" [71a20992-8279-4040-9edc-bedef6e7b570] Pending: PodScheduled:SchedulerError (pod 71a20992-8279-4040-9edc-bedef6e7b570(kube-system/kindnet-mk6pv) is in the cache, so can't be assumed)
I0919 22:24:11.657604 69358 system_pods.go:61] "kube-apiserver-ha-326307" [48020293-8f00-4ab7-8361-d21025061653] Running
I0919 22:24:11.657609 69358 system_pods.go:61] "kube-apiserver-ha-326307-m02" [568fe413-bf13-4b89-867f-a74dacede73f] Pending
I0919 22:24:11.657616 69358 system_pods.go:61] "kube-controller-manager-ha-326307" [a62d94c7-7f48-4b34-9985-58de1d7d32bc] Running
I0919 22:24:11.657621 69358 system_pods.go:61] "kube-controller-manager-ha-326307-m02" [0930e36a-1e9b-4f15-ac20-4fb1696fa911] Pending
I0919 22:24:11.657626 69358 system_pods.go:61] "kube-proxy-8kxtv" [70be5fcc-7ab6-4eb1-870d-988fee1a01bb] Running
I0919 22:24:11.657636 69358 system_pods.go:61] "kube-proxy-q8mtj" [6e3896c8-f771-462e-888d-942ebc96a7c2] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-q8mtj": pod kube-proxy-q8mtj is already assigned to node "ha-326307-m02")
I0919 22:24:11.657642 69358 system_pods.go:61] "kube-scheduler-ha-326307" [da6af764-e4e6-48aa-9569-577e4379692f] Running
I0919 22:24:11.657649 69358 system_pods.go:61] "kube-scheduler-ha-326307-m02" [f6878d24-de85-4cf9-a49f-7ff55bf06519] Pending
I0919 22:24:11.657654 69358 system_pods.go:61] "kube-vip-ha-326307" [36baecf0-60bd-41c0-a3c8-45e4f6ebddad] Running
I0919 22:24:11.657660 69358 system_pods.go:61] "kube-vip-ha-326307-m02" [24b5d637-78d1-41f7-8e00-40fee7f9e60f] Pending
I0919 22:24:11.657665 69358 system_pods.go:61] "storage-provisioner" [cafe04c6-2dce-4b93-b6d1-205efc39b360] Running
I0919 22:24:11.657673 69358 system_pods.go:74] duration metric: took 4.68298ms to wait for pod list to return data ...
I0919 22:24:11.657687 69358 default_sa.go:34] waiting for default service account to be created ...
I0919 22:24:11.660430 69358 default_sa.go:45] found service account: "default"
I0919 22:24:11.660456 69358 default_sa.go:55] duration metric: took 2.762581ms for default service account to be created ...
I0919 22:24:11.660467 69358 system_pods.go:116] waiting for k8s-apps to be running ...
I0919 22:24:11.664515 69358 system_pods.go:86] 17 kube-system pods found
I0919 22:24:11.664549 69358 system_pods.go:89] "coredns-66bc5c9577-9j5pw" [7d073e38-b63e-494d-bda0-3dde372a950b] Running
I0919 22:24:11.664557 69358 system_pods.go:89] "coredns-66bc5c9577-wqvzd" [64376c4d-1b82-490d-887d-7f628b134014] Running
I0919 22:24:11.664563 69358 system_pods.go:89] "etcd-ha-326307" [cc755641-9756-42fe-94ea-76d3167a2f67] Running
I0919 22:24:11.664567 69358 system_pods.go:89] "etcd-ha-326307-m02" [fe655813-ee01-420d-a127-9e43d85b3674] Pending
I0919 22:24:11.664574 69358 system_pods.go:89] "kindnet-gxnzs" [4fa827fc-0ba7-49b7-a225-e36d76241d92] Running
I0919 22:24:11.664583 69358 system_pods.go:89] "kindnet-mk6pv" [71a20992-8279-4040-9edc-bedef6e7b570] Pending: PodScheduled:SchedulerError (pod 71a20992-8279-4040-9edc-bedef6e7b570(kube-system/kindnet-mk6pv) is in the cache, so can't be assumed)
I0919 22:24:11.664590 69358 system_pods.go:89] "kube-apiserver-ha-326307" [48020293-8f00-4ab7-8361-d21025061653] Running
I0919 22:24:11.664594 69358 system_pods.go:89] "kube-apiserver-ha-326307-m02" [568fe413-bf13-4b89-867f-a74dacede73f] Pending
I0919 22:24:11.664600 69358 system_pods.go:89] "kube-controller-manager-ha-326307" [a62d94c7-7f48-4b34-9985-58de1d7d32bc] Running
I0919 22:24:11.664606 69358 system_pods.go:89] "kube-controller-manager-ha-326307-m02" [0930e36a-1e9b-4f15-ac20-4fb1696fa911] Pending
I0919 22:24:11.664615 69358 system_pods.go:89] "kube-proxy-8kxtv" [70be5fcc-7ab6-4eb1-870d-988fee1a01bb] Running
I0919 22:24:11.664623 69358 system_pods.go:89] "kube-proxy-q8mtj" [6e3896c8-f771-462e-888d-942ebc96a7c2] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-q8mtj": pod kube-proxy-q8mtj is already assigned to node "ha-326307-m02")
I0919 22:24:11.664629 69358 system_pods.go:89] "kube-scheduler-ha-326307" [da6af764-e4e6-48aa-9569-577e4379692f] Running
I0919 22:24:11.664637 69358 system_pods.go:89] "kube-scheduler-ha-326307-m02" [f6878d24-de85-4cf9-a49f-7ff55bf06519] Pending
I0919 22:24:11.664643 69358 system_pods.go:89] "kube-vip-ha-326307" [36baecf0-60bd-41c0-a3c8-45e4f6ebddad] Running
I0919 22:24:11.664649 69358 system_pods.go:89] "kube-vip-ha-326307-m02" [24b5d637-78d1-41f7-8e00-40fee7f9e60f] Pending
I0919 22:24:11.664653 69358 system_pods.go:89] "storage-provisioner" [cafe04c6-2dce-4b93-b6d1-205efc39b360] Running
I0919 22:24:11.664663 69358 system_pods.go:126] duration metric: took 4.189005ms to wait for k8s-apps to be running ...
I0919 22:24:11.664676 69358 system_svc.go:44] waiting for kubelet service to be running ....
I0919 22:24:11.664734 69358 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0919 22:24:11.677679 69358 system_svc.go:56] duration metric: took 12.991783ms WaitForService to wait for kubelet
I0919 22:24:11.677718 69358 kubeadm.go:578] duration metric: took 2.666095008s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0919 22:24:11.677741 69358 node_conditions.go:102] verifying NodePressure condition ...
I0919 22:24:11.681219 69358 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0919 22:24:11.681249 69358 node_conditions.go:123] node cpu capacity is 8
I0919 22:24:11.681276 69358 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0919 22:24:11.681282 69358 node_conditions.go:123] node cpu capacity is 8
I0919 22:24:11.681288 69358 node_conditions.go:105] duration metric: took 3.540774ms to run NodePressure ...
I0919 22:24:11.681302 69358 start.go:241] waiting for startup goroutines ...
I0919 22:24:11.681336 69358 start.go:255] writing updated cluster config ...
I0919 22:24:11.683465 69358 out.go:203]
I0919 22:24:11.685336 69358 config.go:182] Loaded profile config "ha-326307": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0919 22:24:11.685480 69358 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/config.json ...
I0919 22:24:11.687190 69358 out.go:179] * Starting "ha-326307-m03" control-plane node in "ha-326307" cluster
I0919 22:24:11.688774 69358 cache.go:123] Beginning downloading kic base image for docker with containerd
I0919 22:24:11.690230 69358 out.go:179] * Pulling base image v0.0.48 ...
I0919 22:24:11.691529 69358 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0919 22:24:11.691564 69358 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0919 22:24:11.691570 69358 cache.go:58] Caching tarball of preloaded images
I0919 22:24:11.691776 69358 preload.go:172] Found /home/jenkins/minikube-integration/21594-14678/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I0919 22:24:11.691792 69358 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on containerd
I0919 22:24:11.691940 69358 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/config.json ...
I0919 22:24:11.714494 69358 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0919 22:24:11.714516 69358 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0919 22:24:11.714538 69358 cache.go:232] Successfully downloaded all kic artifacts
I0919 22:24:11.714564 69358 start.go:360] acquireMachinesLock for ha-326307-m03: {Name:mk07818636650a6efffb19d787e84a34d6f1dd98 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0919 22:24:11.714717 69358 start.go:364] duration metric: took 129.412µs to acquireMachinesLock for "ha-326307-m03"
I0919 22:24:11.714749 69358 start.go:93] Provisioning new machine with config: &{Name:ha-326307 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-326307 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true} {Name:m03 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:fal
se kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPa
th: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name:m03 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0919 22:24:11.714883 69358 start.go:125] createHost starting for "m03" (driver="docker")
I0919 22:24:11.717146 69358 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0919 22:24:11.717288 69358 start.go:159] libmachine.API.Create for "ha-326307" (driver="docker")
I0919 22:24:11.717325 69358 client.go:168] LocalClient.Create starting
I0919 22:24:11.717396 69358 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem
I0919 22:24:11.717429 69358 main.go:141] libmachine: Decoding PEM data...
I0919 22:24:11.717444 69358 main.go:141] libmachine: Parsing certificate...
I0919 22:24:11.717499 69358 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-14678/.minikube/certs/cert.pem
I0919 22:24:11.717523 69358 main.go:141] libmachine: Decoding PEM data...
I0919 22:24:11.717531 69358 main.go:141] libmachine: Parsing certificate...
I0919 22:24:11.717757 69358 cli_runner.go:164] Run: docker network inspect ha-326307 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:24:11.736709 69358 network_create.go:77] Found existing network {name:ha-326307 subnet:0xc001c6a9f0 gateway:[0 0 0 0 0 0 0 0 0 0 255 255 192 168 49 1] mtu:1500}
I0919 22:24:11.736749 69358 kic.go:121] calculated static IP "192.168.49.4" for the "ha-326307-m03" container
I0919 22:24:11.736838 69358 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0919 22:24:11.757855 69358 cli_runner.go:164] Run: docker volume create ha-326307-m03 --label name.minikube.sigs.k8s.io=ha-326307-m03 --label created_by.minikube.sigs.k8s.io=true
I0919 22:24:11.780198 69358 oci.go:103] Successfully created a docker volume ha-326307-m03
I0919 22:24:11.780287 69358 cli_runner.go:164] Run: docker run --rm --name ha-326307-m03-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-326307-m03 --entrypoint /usr/bin/test -v ha-326307-m03:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0919 22:24:12.269719 69358 oci.go:107] Successfully prepared a docker volume ha-326307-m03
I0919 22:24:12.269772 69358 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0919 22:24:12.269795 69358 kic.go:194] Starting extracting preloaded images to volume ...
I0919 22:24:12.269864 69358 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-14678/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-326307-m03:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0919 22:24:16.658999 69358 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-14678/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-326307-m03:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (4.389088771s)
I0919 22:24:16.659030 69358 kic.go:203] duration metric: took 4.389232064s to extract preloaded images to volume ...
W0919 22:24:16.659114 69358 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0919 22:24:16.659151 69358 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0919 22:24:16.659211 69358 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0919 22:24:16.714324 69358 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ha-326307-m03 --name ha-326307-m03 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-326307-m03 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ha-326307-m03 --network ha-326307 --ip 192.168.49.4 --volume ha-326307-m03:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0919 22:24:17.029039 69358 cli_runner.go:164] Run: docker container inspect ha-326307-m03 --format={{.State.Running}}
I0919 22:24:17.050534 69358 cli_runner.go:164] Run: docker container inspect ha-326307-m03 --format={{.State.Status}}
I0919 22:24:17.070017 69358 cli_runner.go:164] Run: docker exec ha-326307-m03 stat /var/lib/dpkg/alternatives/iptables
I0919 22:24:17.125252 69358 oci.go:144] the created container "ha-326307-m03" has a running status.
I0919 22:24:17.125293 69358 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m03/id_rsa...
I0919 22:24:17.618351 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m03/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0919 22:24:17.618395 69358 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m03/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0919 22:24:17.646956 69358 cli_runner.go:164] Run: docker container inspect ha-326307-m03 --format={{.State.Status}}
I0919 22:24:17.667176 69358 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0919 22:24:17.667203 69358 kic_runner.go:114] Args: [docker exec --privileged ha-326307-m03 chown docker:docker /home/docker/.ssh/authorized_keys]
I0919 22:24:17.713667 69358 cli_runner.go:164] Run: docker container inspect ha-326307-m03 --format={{.State.Status}}
I0919 22:24:17.734276 69358 machine.go:93] provisionDockerMachine start ...
I0919 22:24:17.734370 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m03
I0919 22:24:17.755726 69358 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:17.755941 69358 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32798 <nil> <nil>}
I0919 22:24:17.755953 69358 main.go:141] libmachine: About to run SSH command:
hostname
I0919 22:24:17.894482 69358 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-326307-m03
I0919 22:24:17.894512 69358 ubuntu.go:182] provisioning hostname "ha-326307-m03"
I0919 22:24:17.894572 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m03
I0919 22:24:17.914204 69358 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:17.914507 69358 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32798 <nil> <nil>}
I0919 22:24:17.914530 69358 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-326307-m03 && echo "ha-326307-m03" | sudo tee /etc/hostname
I0919 22:24:18.068724 69358 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-326307-m03
I0919 22:24:18.068805 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m03
I0919 22:24:18.088244 69358 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:18.088504 69358 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32798 <nil> <nil>}
I0919 22:24:18.088525 69358 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-326307-m03' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-326307-m03/g' /etc/hosts;
else
echo '127.0.1.1 ha-326307-m03' | sudo tee -a /etc/hosts;
fi
fi
I0919 22:24:18.227353 69358 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0919 22:24:18.227390 69358 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21594-14678/.minikube CaCertPath:/home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21594-14678/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21594-14678/.minikube}
I0919 22:24:18.227421 69358 ubuntu.go:190] setting up certificates
I0919 22:24:18.227433 69358 provision.go:84] configureAuth start
I0919 22:24:18.227496 69358 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-326307-m03
I0919 22:24:18.247948 69358 provision.go:143] copyHostCerts
I0919 22:24:18.247989 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/21594-14678/.minikube/ca.pem
I0919 22:24:18.248023 69358 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-14678/.minikube/ca.pem, removing ...
I0919 22:24:18.248029 69358 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-14678/.minikube/ca.pem
I0919 22:24:18.248096 69358 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21594-14678/.minikube/ca.pem (1082 bytes)
I0919 22:24:18.248231 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/21594-14678/.minikube/cert.pem
I0919 22:24:18.248289 69358 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-14678/.minikube/cert.pem, removing ...
I0919 22:24:18.248299 69358 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-14678/.minikube/cert.pem
I0919 22:24:18.248338 69358 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21594-14678/.minikube/cert.pem (1123 bytes)
I0919 22:24:18.248404 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/21594-14678/.minikube/key.pem
I0919 22:24:18.248423 69358 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-14678/.minikube/key.pem, removing ...
I0919 22:24:18.248427 69358 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-14678/.minikube/key.pem
I0919 22:24:18.248457 69358 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21594-14678/.minikube/key.pem (1675 bytes)
I0919 22:24:18.248512 69358 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21594-14678/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca-key.pem org=jenkins.ha-326307-m03 san=[127.0.0.1 192.168.49.4 ha-326307-m03 localhost minikube]
I0919 22:24:18.393257 69358 provision.go:177] copyRemoteCerts
I0919 22:24:18.393319 69358 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0919 22:24:18.393353 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m03
I0919 22:24:18.412748 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32798 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m03/id_rsa Username:docker}
I0919 22:24:18.514005 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0919 22:24:18.514092 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0919 22:24:18.542657 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/machines/server.pem -> /etc/docker/server.pem
I0919 22:24:18.542733 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0919 22:24:18.569691 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0919 22:24:18.569759 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0919 22:24:18.596329 69358 provision.go:87] duration metric: took 368.876183ms to configureAuth
I0919 22:24:18.596357 69358 ubuntu.go:206] setting minikube options for container-runtime
I0919 22:24:18.596551 69358 config.go:182] Loaded profile config "ha-326307": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0919 22:24:18.596562 69358 machine.go:96] duration metric: took 862.263986ms to provisionDockerMachine
I0919 22:24:18.596567 69358 client.go:171] duration metric: took 6.879237415s to LocalClient.Create
I0919 22:24:18.596586 69358 start.go:167] duration metric: took 6.879300568s to libmachine.API.Create "ha-326307"
I0919 22:24:18.596594 69358 start.go:293] postStartSetup for "ha-326307-m03" (driver="docker")
I0919 22:24:18.596602 69358 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0919 22:24:18.596644 69358 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0919 22:24:18.596677 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m03
I0919 22:24:18.615349 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32798 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m03/id_rsa Username:docker}
I0919 22:24:18.717907 69358 ssh_runner.go:195] Run: cat /etc/os-release
I0919 22:24:18.722093 69358 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0919 22:24:18.722137 69358 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0919 22:24:18.722150 69358 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0919 22:24:18.722173 69358 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0919 22:24:18.722186 69358 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-14678/.minikube/addons for local assets ...
I0919 22:24:18.722248 69358 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-14678/.minikube/files for local assets ...
I0919 22:24:18.722356 69358 filesync.go:149] local asset: /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem -> 182102.pem in /etc/ssl/certs
I0919 22:24:18.722372 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem -> /etc/ssl/certs/182102.pem
I0919 22:24:18.722580 69358 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0919 22:24:18.732899 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem --> /etc/ssl/certs/182102.pem (1708 bytes)
I0919 22:24:18.766453 69358 start.go:296] duration metric: took 169.843532ms for postStartSetup
I0919 22:24:18.766899 69358 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-326307-m03
I0919 22:24:18.786322 69358 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/config.json ...
I0919 22:24:18.786775 69358 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0919 22:24:18.786833 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m03
I0919 22:24:18.806377 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32798 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m03/id_rsa Username:docker}
I0919 22:24:18.901798 69358 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0919 22:24:18.907121 69358 start.go:128] duration metric: took 7.192223106s to createHost
I0919 22:24:18.907180 69358 start.go:83] releasing machines lock for "ha-326307-m03", held for 7.192445142s
I0919 22:24:18.907266 69358 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-326307-m03
I0919 22:24:18.929545 69358 out.go:179] * Found network options:
I0919 22:24:18.931020 69358 out.go:179] - NO_PROXY=192.168.49.2,192.168.49.3
W0919 22:24:18.932299 69358 proxy.go:120] fail to check proxy env: Error ip not in block
W0919 22:24:18.932334 69358 proxy.go:120] fail to check proxy env: Error ip not in block
W0919 22:24:18.932375 69358 proxy.go:120] fail to check proxy env: Error ip not in block
W0919 22:24:18.932396 69358 proxy.go:120] fail to check proxy env: Error ip not in block
I0919 22:24:18.932501 69358 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0919 22:24:18.932558 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m03
I0919 22:24:18.932588 69358 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0919 22:24:18.932662 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307-m03
I0919 22:24:18.952990 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32798 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m03/id_rsa Username:docker}
I0919 22:24:18.953400 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32798 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307-m03/id_rsa Username:docker}
I0919 22:24:19.131622 69358 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0919 22:24:19.165991 69358 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0919 22:24:19.166079 69358 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0919 22:24:19.197850 69358 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0919 22:24:19.197878 69358 start.go:495] detecting cgroup driver to use...
I0919 22:24:19.197909 69358 detect.go:190] detected "systemd" cgroup driver on host os
I0919 22:24:19.197960 69358 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0919 22:24:19.211538 69358 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0919 22:24:19.223959 69358 docker.go:218] disabling cri-docker service (if available) ...
I0919 22:24:19.224009 69358 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0919 22:24:19.239088 69358 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0919 22:24:19.254102 69358 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0919 22:24:19.328965 69358 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0919 22:24:19.406808 69358 docker.go:234] disabling docker service ...
I0919 22:24:19.406888 69358 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0919 22:24:19.425948 69358 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0919 22:24:19.438801 69358 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0919 22:24:19.510941 69358 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0919 22:24:19.581470 69358 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0919 22:24:19.594683 69358 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0919 22:24:19.613666 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0919 22:24:19.627192 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0919 22:24:19.638603 69358 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0919 22:24:19.638668 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0919 22:24:19.649965 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:24:19.661530 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0919 22:24:19.673111 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:24:19.684782 69358 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0919 22:24:19.696056 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0919 22:24:19.707630 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0919 22:24:19.719687 69358 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0919 22:24:19.731477 69358 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0919 22:24:19.741738 69358 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0919 22:24:19.751963 69358 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:19.822277 69358 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0919 22:24:19.931918 69358 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I0919 22:24:19.931995 69358 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0919 22:24:19.936531 69358 start.go:563] Will wait 60s for crictl version
I0919 22:24:19.936591 69358 ssh_runner.go:195] Run: which crictl
I0919 22:24:19.940632 69358 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0919 22:24:19.977944 69358 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.27
RuntimeApiVersion: v1
I0919 22:24:19.978013 69358 ssh_runner.go:195] Run: containerd --version
I0919 22:24:20.003290 69358 ssh_runner.go:195] Run: containerd --version
I0919 22:24:20.032714 69358 out.go:179] * Preparing Kubernetes v1.34.0 on containerd 1.7.27 ...
I0919 22:24:20.034190 69358 out.go:179] - env NO_PROXY=192.168.49.2
I0919 22:24:20.035560 69358 out.go:179] - env NO_PROXY=192.168.49.2,192.168.49.3
I0919 22:24:20.036915 69358 cli_runner.go:164] Run: docker network inspect ha-326307 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:24:20.055444 69358 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0919 22:24:20.059762 69358 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:24:20.072851 69358 mustload.go:65] Loading cluster: ha-326307
I0919 22:24:20.073081 69358 config.go:182] Loaded profile config "ha-326307": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0919 22:24:20.073298 69358 cli_runner.go:164] Run: docker container inspect ha-326307 --format={{.State.Status}}
I0919 22:24:20.091365 69358 host.go:66] Checking if "ha-326307" exists ...
I0919 22:24:20.091605 69358 certs.go:68] Setting up /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307 for IP: 192.168.49.4
I0919 22:24:20.091616 69358 certs.go:194] generating shared ca certs ...
I0919 22:24:20.091629 69358 certs.go:226] acquiring lock for ca certs: {Name:mkd7a2e112725f042a76c7be63aef486d6b9bff2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:20.091746 69358 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21594-14678/.minikube/ca.key
I0919 22:24:20.091786 69358 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21594-14678/.minikube/proxy-client-ca.key
I0919 22:24:20.091796 69358 certs.go:256] generating profile certs ...
I0919 22:24:20.091865 69358 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.key
I0919 22:24:20.091891 69358 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key.95aca604
I0919 22:24:20.091905 69358 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt.95aca604 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.3 192.168.49.4 192.168.49.254]
I0919 22:24:20.372898 69358 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt.95aca604 ...
I0919 22:24:20.372943 69358 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt.95aca604: {Name:mk9b724916886d4c69140cc45e23ce082460d116 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:20.373186 69358 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key.95aca604 ...
I0919 22:24:20.373210 69358 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key.95aca604: {Name:mkfc0cd42f96faa2f697a81fc7ca671182c3cea8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:20.373311 69358 certs.go:381] copying /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt.95aca604 -> /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt
I0919 22:24:20.373471 69358 certs.go:385] copying /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key.95aca604 -> /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key
I0919 22:24:20.373649 69358 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.key
I0919 22:24:20.373668 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0919 22:24:20.373682 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0919 22:24:20.373692 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0919 22:24:20.373703 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0919 22:24:20.373713 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0919 22:24:20.373723 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0919 22:24:20.373733 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0919 22:24:20.373743 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0919 22:24:20.373795 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/18210.pem (1338 bytes)
W0919 22:24:20.373823 69358 certs.go:480] ignoring /home/jenkins/minikube-integration/21594-14678/.minikube/certs/18210_empty.pem, impossibly tiny 0 bytes
I0919 22:24:20.373832 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca-key.pem (1679 bytes)
I0919 22:24:20.373856 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/ca.pem (1082 bytes)
I0919 22:24:20.373878 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/cert.pem (1123 bytes)
I0919 22:24:20.373899 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/key.pem (1675 bytes)
I0919 22:24:20.373936 69358 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem (1708 bytes)
I0919 22:24:20.373962 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem -> /usr/share/ca-certificates/182102.pem
I0919 22:24:20.373976 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:20.373987 69358 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-14678/.minikube/certs/18210.pem -> /usr/share/ca-certificates/18210.pem
I0919 22:24:20.374034 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307
I0919 22:24:20.394051 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307/id_rsa Username:docker}
I0919 22:24:20.484593 69358 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.pub
I0919 22:24:20.489010 69358 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.pub --> memory (451 bytes)
I0919 22:24:20.503471 69358 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.key
I0919 22:24:20.507649 69358 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.key --> memory (1679 bytes)
I0919 22:24:20.522195 69358 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.crt
I0919 22:24:20.526410 69358 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.crt --> memory (1123 bytes)
I0919 22:24:20.541840 69358 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.key
I0919 22:24:20.546043 69358 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.key --> memory (1679 bytes)
I0919 22:24:20.560364 69358 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.crt
I0919 22:24:20.564230 69358 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.crt --> memory (1094 bytes)
I0919 22:24:20.577547 69358 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.key
I0919 22:24:20.581387 69358 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.key --> memory (1675 bytes)
I0919 22:24:20.594800 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0919 22:24:20.622991 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0919 22:24:20.651461 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0919 22:24:20.678113 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0919 22:24:20.705292 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1444 bytes)
I0919 22:24:20.732489 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0919 22:24:20.762310 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0919 22:24:20.789808 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0919 22:24:20.819251 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/files/etc/ssl/certs/182102.pem --> /usr/share/ca-certificates/182102.pem (1708 bytes)
I0919 22:24:20.851010 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0919 22:24:20.879714 69358 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-14678/.minikube/certs/18210.pem --> /usr/share/ca-certificates/18210.pem (1338 bytes)
I0919 22:24:20.908177 69358 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.pub (451 bytes)
I0919 22:24:20.928644 69358 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.key (1679 bytes)
I0919 22:24:20.949340 69358 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.crt (1123 bytes)
I0919 22:24:20.969391 69358 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.key (1679 bytes)
I0919 22:24:20.989837 69358 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.crt (1094 bytes)
I0919 22:24:21.011118 69358 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.key (1675 bytes)
I0919 22:24:21.031485 69358 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (744 bytes)
I0919 22:24:21.052354 69358 ssh_runner.go:195] Run: openssl version
I0919 22:24:21.058486 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/18210.pem && ln -fs /usr/share/ca-certificates/18210.pem /etc/ssl/certs/18210.pem"
I0919 22:24:21.069582 69358 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/18210.pem
I0919 22:24:21.074372 69358 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 19 22:20 /usr/share/ca-certificates/18210.pem
I0919 22:24:21.074440 69358 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/18210.pem
I0919 22:24:21.082186 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/18210.pem /etc/ssl/certs/51391683.0"
I0919 22:24:21.092957 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/182102.pem && ln -fs /usr/share/ca-certificates/182102.pem /etc/ssl/certs/182102.pem"
I0919 22:24:21.104085 69358 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/182102.pem
I0919 22:24:21.108193 69358 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 19 22:20 /usr/share/ca-certificates/182102.pem
I0919 22:24:21.108258 69358 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/182102.pem
I0919 22:24:21.116078 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/182102.pem /etc/ssl/certs/3ec20f2e.0"
I0919 22:24:21.127607 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0919 22:24:21.139338 69358 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:21.143794 69358 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 19 22:15 /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:21.143848 69358 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:21.151321 69358 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0919 22:24:21.162759 69358 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0919 22:24:21.166499 69358 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0919 22:24:21.166555 69358 kubeadm.go:926] updating node {m03 192.168.49.4 8443 v1.34.0 containerd true true} ...
I0919 22:24:21.166642 69358 kubeadm.go:938] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-326307-m03 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.4
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:ha-326307 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0919 22:24:21.166677 69358 kube-vip.go:115] generating kube-vip config ...
I0919 22:24:21.166738 69358 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I0919 22:24:21.180123 69358 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I0919 22:24:21.180202 69358 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v1.0.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/admin.conf"
name: kubeconfig
status: {}
I0919 22:24:21.180261 69358 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0919 22:24:21.189900 69358 binaries.go:44] Found k8s binaries, skipping transfer
I0919 22:24:21.189963 69358 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /etc/kubernetes/manifests
I0919 22:24:21.200336 69358 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
I0919 22:24:21.220715 69358 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0919 22:24:21.244525 69358 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1358 bytes)
I0919 22:24:21.268789 69358 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I0919 22:24:21.272885 69358 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:24:21.285764 69358 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:21.362911 69358 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0919 22:24:21.394403 69358 host.go:66] Checking if "ha-326307" exists ...
I0919 22:24:21.394691 69358 start.go:317] joinCluster: &{Name:ha-326307 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-326307 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[]
DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true} {Name:m03 IP:192.168.49.4 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:
false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVM
netPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0919 22:24:21.394850 69358 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm token create --print-join-command --ttl=0"
I0919 22:24:21.394898 69358 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-326307
I0919 22:24:21.419020 69358 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-14678/.minikube/machines/ha-326307/id_rsa Username:docker}
I0919 22:24:21.569927 69358 start.go:343] trying to join control-plane node "m03" to cluster: &{Name:m03 IP:192.168.49.4 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0919 22:24:21.569980 69358 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token uhifqr.okdtfjqzhuoxbb2e --discovery-token-ca-cert-hash sha256:dae3dd920fb027024a058f7784382f806dfdbf0483a893c299b72dd41dc8aff6 --ignore-preflight-errors=all --cri-socket unix:///run/containerd/containerd.sock --node-name=ha-326307-m03 --control-plane --apiserver-advertise-address=192.168.49.4 --apiserver-bind-port=8443"
I0919 22:24:32.089764 69358 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token uhifqr.okdtfjqzhuoxbb2e --discovery-token-ca-cert-hash sha256:dae3dd920fb027024a058f7784382f806dfdbf0483a893c299b72dd41dc8aff6 --ignore-preflight-errors=all --cri-socket unix:///run/containerd/containerd.sock --node-name=ha-326307-m03 --control-plane --apiserver-advertise-address=192.168.49.4 --apiserver-bind-port=8443": (10.519762438s)
I0919 22:24:32.089793 69358 ssh_runner.go:195] Run: /bin/bash -c "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet"
I0919 22:24:32.309566 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes ha-326307-m03 minikube.k8s.io/updated_at=2025_09_19T22_24_32_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=6e37ee63f758843bb5fe33c3a528c564c4b83d53 minikube.k8s.io/name=ha-326307 minikube.k8s.io/primary=false
I0919 22:24:32.391142 69358 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig taint nodes ha-326307-m03 node-role.kubernetes.io/control-plane:NoSchedule-
I0919 22:24:32.471336 69358 start.go:319] duration metric: took 11.076641052s to joinCluster
I0919 22:24:32.471402 69358 start.go:235] Will wait 6m0s for node &{Name:m03 IP:192.168.49.4 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0919 22:24:32.471770 69358 config.go:182] Loaded profile config "ha-326307": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0919 22:24:32.473461 69358 out.go:179] * Verifying Kubernetes components...
I0919 22:24:32.475427 69358 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:32.579664 69358 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0919 22:24:32.593786 69358 kapi.go:59] client config for ha-326307: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.crt", KeyFile:"/home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.key", CAFile:"/home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(nil)
}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4a00), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
W0919 22:24:32.593856 69358 kubeadm.go:483] Overriding stale ClientConfig host https://192.168.49.254:8443 with https://192.168.49.2:8443
I0919 22:24:32.594084 69358 node_ready.go:35] waiting up to 6m0s for node "ha-326307-m03" to be "Ready" ...
W0919 22:24:34.597297 69358 node_ready.go:57] node "ha-326307-m03" has "Ready":"False" status (will retry)
I0919 22:24:35.098269 69358 node_ready.go:49] node "ha-326307-m03" is "Ready"
I0919 22:24:35.098296 69358 node_ready.go:38] duration metric: took 2.504196997s for node "ha-326307-m03" to be "Ready" ...
I0919 22:24:35.098310 69358 api_server.go:52] waiting for apiserver process to appear ...
I0919 22:24:35.098358 69358 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0919 22:24:35.111440 69358 api_server.go:72] duration metric: took 2.640014462s to wait for apiserver process to appear ...
I0919 22:24:35.111465 69358 api_server.go:88] waiting for apiserver healthz status ...
I0919 22:24:35.111483 69358 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0919 22:24:35.115724 69358 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0919 22:24:35.116810 69358 api_server.go:141] control plane version: v1.34.0
I0919 22:24:35.116837 69358 api_server.go:131] duration metric: took 5.364462ms to wait for apiserver health ...
I0919 22:24:35.116849 69358 system_pods.go:43] waiting for kube-system pods to appear ...
I0919 22:24:35.123343 69358 system_pods.go:59] 27 kube-system pods found
I0919 22:24:35.123372 69358 system_pods.go:61] "coredns-66bc5c9577-9j5pw" [7d073e38-b63e-494d-bda0-3dde372a950b] Running
I0919 22:24:35.123377 69358 system_pods.go:61] "coredns-66bc5c9577-wqvzd" [64376c4d-1b82-490d-887d-7f628b134014] Running
I0919 22:24:35.123380 69358 system_pods.go:61] "etcd-ha-326307" [cc755641-9756-42fe-94ea-76d3167a2f67] Running
I0919 22:24:35.123384 69358 system_pods.go:61] "etcd-ha-326307-m02" [fe655813-ee01-420d-a127-9e43d85b3674] Running
I0919 22:24:35.123387 69358 system_pods.go:61] "etcd-ha-326307-m03" [2264c92a-675d-4d92-b0c7-640bfa6eab93] Pending
I0919 22:24:35.123390 69358 system_pods.go:61] "kindnet-gxnzs" [4fa827fc-0ba7-49b7-a225-e36d76241d92] Running
I0919 22:24:35.123393 69358 system_pods.go:61] "kindnet-mk6pv" [71a20992-8279-4040-9edc-bedef6e7b570] Running
I0919 22:24:35.123400 69358 system_pods.go:61] "kindnet-pnj9r" [14a458fc-0e9d-42e9-9473-f7f2a6f7f571] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-pnj9r": pod kindnet-pnj9r is already assigned to node "ha-326307-m03")
I0919 22:24:35.123408 69358 system_pods.go:61] "kindnet-qxwpq" [173e48ec-ef56-4824-9f55-a04b199b7943] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-qxwpq": pod kindnet-qxwpq is already assigned to node "ha-326307-m03")
I0919 22:24:35.123416 69358 system_pods.go:61] "kindnet-wcct9" [5472dcae-344b-43fb-84d1-8d0d41852cd1] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-wcct9": pod kindnet-wcct9 is already assigned to node "ha-326307-m03")
I0919 22:24:35.123427 69358 system_pods.go:61] "kube-apiserver-ha-326307" [48020293-8f00-4ab7-8361-d21025061653] Running
I0919 22:24:35.123433 69358 system_pods.go:61] "kube-apiserver-ha-326307-m02" [568fe413-bf13-4b89-867f-a74dacede73f] Running
I0919 22:24:35.123445 69358 system_pods.go:61] "kube-apiserver-ha-326307-m03" [a068235a-a6f2-4e72-a4ab-b61d248187d3] Pending
I0919 22:24:35.123450 69358 system_pods.go:61] "kube-controller-manager-ha-326307" [a62d94c7-7f48-4b34-9985-58de1d7d32bc] Running
I0919 22:24:35.123454 69358 system_pods.go:61] "kube-controller-manager-ha-326307-m02" [0930e36a-1e9b-4f15-ac20-4fb1696fa911] Running
I0919 22:24:35.123457 69358 system_pods.go:61] "kube-controller-manager-ha-326307-m03" [b1dba457-e157-4c9e-ba28-c2c383eb13d8] Pending
I0919 22:24:35.123461 69358 system_pods.go:61] "kube-proxy-6nmjx" [81414747-6c4e-495e-a28d-cb17f0c0c306] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-6nmjx": pod kube-proxy-6nmjx is already assigned to node "ha-326307-m03")
I0919 22:24:35.123465 69358 system_pods.go:61] "kube-proxy-8kxtv" [70be5fcc-7ab6-4eb1-870d-988fee1a01bb] Running
I0919 22:24:35.123469 69358 system_pods.go:61] "kube-proxy-q8mtj" [6e3896c8-f771-462e-888d-942ebc96a7c2] Running
I0919 22:24:35.123472 69358 system_pods.go:61] "kube-proxy-ws89d" [db26755e-db93-40a7-9f1a-f52205a1df48] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-ws89d": pod kube-proxy-ws89d is already assigned to node "ha-326307-m03")
I0919 22:24:35.123477 69358 system_pods.go:61] "kube-scheduler-ha-326307" [da6af764-e4e6-48aa-9569-577e4379692f] Running
I0919 22:24:35.123481 69358 system_pods.go:61] "kube-scheduler-ha-326307-m02" [f6878d24-de85-4cf9-a49f-7ff55bf06519] Running
I0919 22:24:35.123487 69358 system_pods.go:61] "kube-scheduler-ha-326307-m03" [2d92661f-37cb-443e-b082-3960536ed3a8] Pending
I0919 22:24:35.123489 69358 system_pods.go:61] "kube-vip-ha-326307" [36baecf0-60bd-41c0-a3c8-45e4f6ebddad] Running
I0919 22:24:35.123492 69358 system_pods.go:61] "kube-vip-ha-326307-m02" [24b5d637-78d1-41f7-8e00-40fee7f9e60f] Running
I0919 22:24:35.123496 69358 system_pods.go:61] "kube-vip-ha-326307-m03" [c9b028c5-322e-49e8-8195-c7a478179f74] Pending
I0919 22:24:35.123503 69358 system_pods.go:61] "storage-provisioner" [cafe04c6-2dce-4b93-b6d1-205efc39b360] Running
I0919 22:24:35.123511 69358 system_pods.go:74] duration metric: took 6.65469ms to wait for pod list to return data ...
I0919 22:24:35.123525 69358 default_sa.go:34] waiting for default service account to be created ...
I0919 22:24:35.126592 69358 default_sa.go:45] found service account: "default"
I0919 22:24:35.126616 69358 default_sa.go:55] duration metric: took 3.083846ms for default service account to be created ...
I0919 22:24:35.126627 69358 system_pods.go:116] waiting for k8s-apps to be running ...
I0919 22:24:35.131895 69358 system_pods.go:86] 27 kube-system pods found
I0919 22:24:35.131928 69358 system_pods.go:89] "coredns-66bc5c9577-9j5pw" [7d073e38-b63e-494d-bda0-3dde372a950b] Running
I0919 22:24:35.131936 69358 system_pods.go:89] "coredns-66bc5c9577-wqvzd" [64376c4d-1b82-490d-887d-7f628b134014] Running
I0919 22:24:35.131941 69358 system_pods.go:89] "etcd-ha-326307" [cc755641-9756-42fe-94ea-76d3167a2f67] Running
I0919 22:24:35.131946 69358 system_pods.go:89] "etcd-ha-326307-m02" [fe655813-ee01-420d-a127-9e43d85b3674] Running
I0919 22:24:35.131950 69358 system_pods.go:89] "etcd-ha-326307-m03" [2264c92a-675d-4d92-b0c7-640bfa6eab93] Pending
I0919 22:24:35.131954 69358 system_pods.go:89] "kindnet-gxnzs" [4fa827fc-0ba7-49b7-a225-e36d76241d92] Running
I0919 22:24:35.131959 69358 system_pods.go:89] "kindnet-mk6pv" [71a20992-8279-4040-9edc-bedef6e7b570] Running
I0919 22:24:35.131968 69358 system_pods.go:89] "kindnet-pnj9r" [14a458fc-0e9d-42e9-9473-f7f2a6f7f571] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-pnj9r": pod kindnet-pnj9r is already assigned to node "ha-326307-m03")
I0919 22:24:35.131975 69358 system_pods.go:89] "kindnet-qxwpq" [173e48ec-ef56-4824-9f55-a04b199b7943] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-qxwpq": pod kindnet-qxwpq is already assigned to node "ha-326307-m03")
I0919 22:24:35.131986 69358 system_pods.go:89] "kindnet-wcct9" [5472dcae-344b-43fb-84d1-8d0d41852cd1] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-wcct9": pod kindnet-wcct9 is already assigned to node "ha-326307-m03")
I0919 22:24:35.131993 69358 system_pods.go:89] "kube-apiserver-ha-326307" [48020293-8f00-4ab7-8361-d21025061653] Running
I0919 22:24:35.132003 69358 system_pods.go:89] "kube-apiserver-ha-326307-m02" [568fe413-bf13-4b89-867f-a74dacede73f] Running
I0919 22:24:35.132009 69358 system_pods.go:89] "kube-apiserver-ha-326307-m03" [a068235a-a6f2-4e72-a4ab-b61d248187d3] Pending
I0919 22:24:35.132015 69358 system_pods.go:89] "kube-controller-manager-ha-326307" [a62d94c7-7f48-4b34-9985-58de1d7d32bc] Running
I0919 22:24:35.132022 69358 system_pods.go:89] "kube-controller-manager-ha-326307-m02" [0930e36a-1e9b-4f15-ac20-4fb1696fa911] Running
I0919 22:24:35.132028 69358 system_pods.go:89] "kube-controller-manager-ha-326307-m03" [b1dba457-e157-4c9e-ba28-c2c383eb13d8] Pending
I0919 22:24:35.132035 69358 system_pods.go:89] "kube-proxy-6nmjx" [81414747-6c4e-495e-a28d-cb17f0c0c306] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-6nmjx": pod kube-proxy-6nmjx is already assigned to node "ha-326307-m03")
I0919 22:24:35.132044 69358 system_pods.go:89] "kube-proxy-8kxtv" [70be5fcc-7ab6-4eb1-870d-988fee1a01bb] Running
I0919 22:24:35.132050 69358 system_pods.go:89] "kube-proxy-q8mtj" [6e3896c8-f771-462e-888d-942ebc96a7c2] Running
I0919 22:24:35.132057 69358 system_pods.go:89] "kube-proxy-ws89d" [db26755e-db93-40a7-9f1a-f52205a1df48] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-ws89d": pod kube-proxy-ws89d is already assigned to node "ha-326307-m03")
I0919 22:24:35.132067 69358 system_pods.go:89] "kube-scheduler-ha-326307" [da6af764-e4e6-48aa-9569-577e4379692f] Running
I0919 22:24:35.132076 69358 system_pods.go:89] "kube-scheduler-ha-326307-m02" [f6878d24-de85-4cf9-a49f-7ff55bf06519] Running
I0919 22:24:35.132082 69358 system_pods.go:89] "kube-scheduler-ha-326307-m03" [2d92661f-37cb-443e-b082-3960536ed3a8] Pending
I0919 22:24:35.132090 69358 system_pods.go:89] "kube-vip-ha-326307" [36baecf0-60bd-41c0-a3c8-45e4f6ebddad] Running
I0919 22:24:35.132096 69358 system_pods.go:89] "kube-vip-ha-326307-m02" [24b5d637-78d1-41f7-8e00-40fee7f9e60f] Running
I0919 22:24:35.132101 69358 system_pods.go:89] "kube-vip-ha-326307-m03" [c9b028c5-322e-49e8-8195-c7a478179f74] Pending
I0919 22:24:35.132107 69358 system_pods.go:89] "storage-provisioner" [cafe04c6-2dce-4b93-b6d1-205efc39b360] Running
I0919 22:24:35.132117 69358 system_pods.go:126] duration metric: took 5.483041ms to wait for k8s-apps to be running ...
I0919 22:24:35.132130 69358 system_svc.go:44] waiting for kubelet service to be running ....
I0919 22:24:35.132201 69358 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0919 22:24:35.145901 69358 system_svc.go:56] duration metric: took 13.762213ms WaitForService to wait for kubelet
I0919 22:24:35.145934 69358 kubeadm.go:578] duration metric: took 2.67451015s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0919 22:24:35.145953 69358 node_conditions.go:102] verifying NodePressure condition ...
I0919 22:24:35.149091 69358 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0919 22:24:35.149114 69358 node_conditions.go:123] node cpu capacity is 8
I0919 22:24:35.149122 69358 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0919 22:24:35.149126 69358 node_conditions.go:123] node cpu capacity is 8
I0919 22:24:35.149129 69358 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0919 22:24:35.149133 69358 node_conditions.go:123] node cpu capacity is 8
I0919 22:24:35.149137 69358 node_conditions.go:105] duration metric: took 3.180117ms to run NodePressure ...
I0919 22:24:35.149147 69358 start.go:241] waiting for startup goroutines ...
I0919 22:24:35.149187 69358 start.go:255] writing updated cluster config ...
I0919 22:24:35.149520 69358 ssh_runner.go:195] Run: rm -f paused
I0919 22:24:35.153920 69358 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I0919 22:24:35.154452 69358 kapi.go:59] client config for ha-326307: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.crt", KeyFile:"/home/jenkins/minikube-integration/21594-14678/.minikube/profiles/ha-326307/client.key", CAFile:"/home/jenkins/minikube-integration/21594-14678/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(nil)
}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4a00), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0919 22:24:35.158459 69358 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-9j5pw" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:35.164361 69358 pod_ready.go:94] pod "coredns-66bc5c9577-9j5pw" is "Ready"
I0919 22:24:35.164388 69358 pod_ready.go:86] duration metric: took 5.90604ms for pod "coredns-66bc5c9577-9j5pw" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:35.164396 69358 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-wqvzd" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:35.170275 69358 pod_ready.go:94] pod "coredns-66bc5c9577-wqvzd" is "Ready"
I0919 22:24:35.170305 69358 pod_ready.go:86] duration metric: took 5.903438ms for pod "coredns-66bc5c9577-wqvzd" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:35.221651 69358 pod_ready.go:83] waiting for pod "etcd-ha-326307" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:35.227692 69358 pod_ready.go:94] pod "etcd-ha-326307" is "Ready"
I0919 22:24:35.227721 69358 pod_ready.go:86] duration metric: took 6.035355ms for pod "etcd-ha-326307" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:35.227738 69358 pod_ready.go:83] waiting for pod "etcd-ha-326307-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:35.234705 69358 pod_ready.go:94] pod "etcd-ha-326307-m02" is "Ready"
I0919 22:24:35.234755 69358 pod_ready.go:86] duration metric: took 6.991962ms for pod "etcd-ha-326307-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:35.234769 69358 pod_ready.go:83] waiting for pod "etcd-ha-326307-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:35.355285 69358 request.go:683] "Waited before sending request" delay="120.371513ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/etcd-ha-326307-m03"
I0919 22:24:35.555444 69358 request.go:683] "Waited before sending request" delay="196.344855ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307-m03"
I0919 22:24:35.955374 69358 request.go:683] "Waited before sending request" delay="196.276117ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307-m03"
I0919 22:24:35.958866 69358 pod_ready.go:94] pod "etcd-ha-326307-m03" is "Ready"
I0919 22:24:35.958897 69358 pod_ready.go:86] duration metric: took 724.121102ms for pod "etcd-ha-326307-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:36.155371 69358 request.go:683] "Waited before sending request" delay="196.353052ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods?labelSelector=component%3Dkube-apiserver"
I0919 22:24:36.158952 69358 pod_ready.go:83] waiting for pod "kube-apiserver-ha-326307" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:36.355354 69358 request.go:683] "Waited before sending request" delay="196.272183ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-326307"
I0919 22:24:36.555231 69358 request.go:683] "Waited before sending request" delay="196.389456ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307"
I0919 22:24:36.558900 69358 pod_ready.go:94] pod "kube-apiserver-ha-326307" is "Ready"
I0919 22:24:36.558927 69358 pod_ready.go:86] duration metric: took 399.940435ms for pod "kube-apiserver-ha-326307" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:36.558936 69358 pod_ready.go:83] waiting for pod "kube-apiserver-ha-326307-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:36.755357 69358 request.go:683] "Waited before sending request" delay="196.333509ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-326307-m02"
I0919 22:24:36.955622 69358 request.go:683] "Waited before sending request" delay="196.371107ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307-m02"
I0919 22:24:36.958850 69358 pod_ready.go:94] pod "kube-apiserver-ha-326307-m02" is "Ready"
I0919 22:24:36.958881 69358 pod_ready.go:86] duration metric: took 399.937855ms for pod "kube-apiserver-ha-326307-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:36.958892 69358 pod_ready.go:83] waiting for pod "kube-apiserver-ha-326307-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:37.155391 69358 request.go:683] "Waited before sending request" delay="196.40338ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-326307-m03"
I0919 22:24:37.355336 69358 request.go:683] "Waited before sending request" delay="196.255836ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307-m03"
I0919 22:24:37.358527 69358 pod_ready.go:94] pod "kube-apiserver-ha-326307-m03" is "Ready"
I0919 22:24:37.358558 69358 pod_ready.go:86] duration metric: took 399.659411ms for pod "kube-apiserver-ha-326307-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:37.555013 69358 request.go:683] "Waited before sending request" delay="196.298446ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods?labelSelector=component%3Dkube-controller-manager"
I0919 22:24:37.559362 69358 pod_ready.go:83] waiting for pod "kube-controller-manager-ha-326307" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:37.755832 69358 request.go:683] "Waited before sending request" delay="196.350309ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-326307"
I0919 22:24:37.954837 69358 request.go:683] "Waited before sending request" delay="195.286624ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307"
I0919 22:24:37.958236 69358 pod_ready.go:94] pod "kube-controller-manager-ha-326307" is "Ready"
I0919 22:24:37.958266 69358 pod_ready.go:86] duration metric: took 398.878465ms for pod "kube-controller-manager-ha-326307" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:37.958274 69358 pod_ready.go:83] waiting for pod "kube-controller-manager-ha-326307-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:38.155758 69358 request.go:683] "Waited before sending request" delay="197.394867ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-326307-m02"
I0919 22:24:38.355929 69358 request.go:683] "Waited before sending request" delay="196.396129ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307-m02"
I0919 22:24:38.359268 69358 pod_ready.go:94] pod "kube-controller-manager-ha-326307-m02" is "Ready"
I0919 22:24:38.359292 69358 pod_ready.go:86] duration metric: took 401.013168ms for pod "kube-controller-manager-ha-326307-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:38.359301 69358 pod_ready.go:83] waiting for pod "kube-controller-manager-ha-326307-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:38.555606 69358 request.go:683] "Waited before sending request" delay="196.234039ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-326307-m03"
I0919 22:24:38.755574 69358 request.go:683] "Waited before sending request" delay="196.387697ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307-m03"
I0919 22:24:38.955366 69358 request.go:683] "Waited before sending request" delay="95.227976ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-326307-m03"
I0919 22:24:39.154881 69358 request.go:683] "Waited before sending request" delay="196.301821ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307-m03"
I0919 22:24:39.555649 69358 request.go:683] "Waited before sending request" delay="192.377634ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307-m03"
I0919 22:24:39.955251 69358 request.go:683] "Waited before sending request" delay="92.286577ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307-m03"
W0919 22:24:40.366591 69358 pod_ready.go:104] pod "kube-controller-manager-ha-326307-m03" is not "Ready", error: <nil>
W0919 22:24:42.367386 69358 pod_ready.go:104] pod "kube-controller-manager-ha-326307-m03" is not "Ready", error: <nil>
I0919 22:24:43.367824 69358 pod_ready.go:94] pod "kube-controller-manager-ha-326307-m03" is "Ready"
I0919 22:24:43.367860 69358 pod_ready.go:86] duration metric: took 5.00855284s for pod "kube-controller-manager-ha-326307-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:43.371145 69358 pod_ready.go:83] waiting for pod "kube-proxy-8kxtv" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:43.376946 69358 pod_ready.go:94] pod "kube-proxy-8kxtv" is "Ready"
I0919 22:24:43.376975 69358 pod_ready.go:86] duration metric: took 5.786362ms for pod "kube-proxy-8kxtv" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:43.376985 69358 pod_ready.go:83] waiting for pod "kube-proxy-q8mtj" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:43.555396 69358 request.go:683] "Waited before sending request" delay="178.323112ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-q8mtj"
I0919 22:24:43.755331 69358 request.go:683] "Waited before sending request" delay="196.35612ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307-m02"
I0919 22:24:43.758666 69358 pod_ready.go:94] pod "kube-proxy-q8mtj" is "Ready"
I0919 22:24:43.758695 69358 pod_ready.go:86] duration metric: took 381.70368ms for pod "kube-proxy-q8mtj" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:43.758704 69358 pod_ready.go:83] waiting for pod "kube-proxy-ws89d" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:24:43.955265 69358 request.go:683] "Waited before sending request" delay="196.399278ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-ws89d"
I0919 22:24:44.155007 69358 request.go:683] "Waited before sending request" delay="196.303687ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307-m03"
I0919 22:24:44.354881 69358 request.go:683] "Waited before sending request" delay="95.2124ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-ws89d"
I0919 22:24:44.555609 69358 request.go:683] "Waited before sending request" delay="197.246504ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307-m03"
I0919 22:24:44.955613 69358 request.go:683] "Waited before sending request" delay="192.471154ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307-m03"
I0919 22:24:45.355390 69358 request.go:683] "Waited before sending request" delay="92.281537ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-326307-m03"
W0919 22:24:45.765195 69358 pod_ready.go:104] pod "kube-proxy-ws89d" is not "Ready", error: <nil>
W0919 22:24:48.265294 69358 pod_ready.go:104] pod "kube-proxy-ws89d" is not "Ready", error: <nil>
W0919 22:24:50.765471 69358 pod_ready.go:104] pod "kube-proxy-ws89d" is not "Ready", error: <nil>
W0919 22:24:53.265410 69358 pod_ready.go:104] pod "kube-proxy-ws89d" is not "Ready", error: <nil>
W0919 22:24:55.265474 69358 pod_ready.go:104] pod "kube-proxy-ws89d" is not "Ready", error: <nil>
W0919 22:24:57.765267 69358 pod_ready.go:104] pod "kube-proxy-ws89d" is not "Ready", error: <nil>
W0919 22:24:59.765483 69358 pod_ready.go:104] pod "kube-proxy-ws89d" is not "Ready", error: <nil>
W0919 22:25:02.266617 69358 pod_ready.go:104] pod "kube-proxy-ws89d" is not "Ready", error: <nil>
W0919 22:25:04.766256 69358 pod_ready.go:104] pod "kube-proxy-ws89d" is not "Ready", error: <nil>
W0919 22:25:07.265177 69358 pod_ready.go:104] pod "kube-proxy-ws89d" is not "Ready", error: <nil>
W0919 22:25:09.265694 69358 pod_ready.go:104] pod "kube-proxy-ws89d" is not "Ready", error: <nil>
W0919 22:25:11.765032 69358 pod_ready.go:104] pod "kube-proxy-ws89d" is not "Ready", error: <nil>
W0919 22:25:13.765313 69358 pod_ready.go:104] pod "kube-proxy-ws89d" is not "Ready", error: <nil>
W0919 22:25:15.766278 69358 pod_ready.go:104] pod "kube-proxy-ws89d" is not "Ready", error: <nil>
I0919 22:25:17.764644 69358 pod_ready.go:94] pod "kube-proxy-ws89d" is "Ready"
I0919 22:25:17.764670 69358 pod_ready.go:86] duration metric: took 34.005951783s for pod "kube-proxy-ws89d" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:17.767738 69358 pod_ready.go:83] waiting for pod "kube-scheduler-ha-326307" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:17.772985 69358 pod_ready.go:94] pod "kube-scheduler-ha-326307" is "Ready"
I0919 22:25:17.773015 69358 pod_ready.go:86] duration metric: took 5.246042ms for pod "kube-scheduler-ha-326307" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:17.773023 69358 pod_ready.go:83] waiting for pod "kube-scheduler-ha-326307-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:17.778916 69358 pod_ready.go:94] pod "kube-scheduler-ha-326307-m02" is "Ready"
I0919 22:25:17.778942 69358 pod_ready.go:86] duration metric: took 5.914033ms for pod "kube-scheduler-ha-326307-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:17.778951 69358 pod_ready.go:83] waiting for pod "kube-scheduler-ha-326307-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:17.784122 69358 pod_ready.go:94] pod "kube-scheduler-ha-326307-m03" is "Ready"
I0919 22:25:17.784165 69358 pod_ready.go:86] duration metric: took 5.193982ms for pod "kube-scheduler-ha-326307-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:17.784183 69358 pod_ready.go:40] duration metric: took 42.630226972s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I0919 22:25:17.833559 69358 start.go:617] kubectl: 1.34.1, cluster: 1.34.0 (minor skew: 0)
I0919 22:25:17.835536 69358 out.go:179] * Done! kubectl is now configured to use "ha-326307" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
7791f71e5d5a5 8c811b4aec35f 12 minutes ago Running busybox 0 b5e0c0fffea25 busybox-7b57f96db7-m8swj
ca68bbc020e20 52546a367cc9e 13 minutes ago Running coredns 0 132023f334782 coredns-66bc5c9577-9j5pw
1f618dc8f0392 52546a367cc9e 13 minutes ago Running coredns 0 a5ac32b4949ab coredns-66bc5c9577-wqvzd
f52d2d9f5881b 6e38f40d628db 13 minutes ago Running storage-provisioner 0 7b77cca917bf4 storage-provisioner
365cc00c2e009 409467f978b4a 13 minutes ago Running kindnet-cni 0 96e027ec2b5fb kindnet-gxnzs
bd9e41958ffbb df0860106674d 13 minutes ago Running kube-proxy 0 06da62af16945 kube-proxy-8kxtv
c6c963d9a0cae 765655ea60781 13 minutes ago Running kube-vip 0 5717652da0ef4 kube-vip-ha-326307
456a0c3cbf5ce 46169d968e920 13 minutes ago Running kube-scheduler 0 f02b9e82ff9b1 kube-scheduler-ha-326307
05ab0247624a7 a0af72f2ec6d6 13 minutes ago Running kube-controller-manager 0 6026f58e8c23a kube-controller-manager-ha-326307
e5c59a6abe977 5f1f5298c888d 13 minutes ago Running etcd 0 5f89382a468ad etcd-ha-326307
e80d65e3c7c18 90550c43ad2bc 13 minutes ago Running kube-apiserver 0 3813626701bd1 kube-apiserver-ha-326307
==> containerd <==
Sep 19 22:23:51 ha-326307 containerd[767]: time="2025-09-19T22:23:51.754439323Z" level=info msg="CreateContainer within sandbox \"a5ac32b4949abcc8c1007cd2947e92633d80d759aeaf0e7d6b490f2610f81170\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Sep 19 22:23:51 ha-326307 containerd[767]: time="2025-09-19T22:23:51.768027085Z" level=info msg="CreateContainer within sandbox \"a5ac32b4949abcc8c1007cd2947e92633d80d759aeaf0e7d6b490f2610f81170\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"1f618dc8f039242512f0147a2a38ee8cc0d3d5730c44724d6fc5d4c498121cd6\""
Sep 19 22:23:51 ha-326307 containerd[767]: time="2025-09-19T22:23:51.768844132Z" level=info msg="StartContainer for \"1f618dc8f039242512f0147a2a38ee8cc0d3d5730c44724d6fc5d4c498121cd6\""
Sep 19 22:23:51 ha-326307 containerd[767]: time="2025-09-19T22:23:51.836885904Z" level=info msg="StartContainer for \"1f618dc8f039242512f0147a2a38ee8cc0d3d5730c44724d6fc5d4c498121cd6\" returns successfully"
Sep 19 22:23:55 ha-326307 containerd[767]: time="2025-09-19T22:23:55.632881043Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bc5c9577-9j5pw,Uid:7d073e38-b63e-494d-bda0-3dde372a950b,Namespace:kube-system,Attempt:0,}"
Sep 19 22:23:55 ha-326307 containerd[767]: time="2025-09-19T22:23:55.759782586Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-66bc5c9577-9j5pw,Uid:7d073e38-b63e-494d-bda0-3dde372a950b,Namespace:kube-system,Attempt:0,} returns sandbox id \"132023f3347828aa89cc27cb846b63299e7492a9d95f45bd87fa130aee9b5cee\""
Sep 19 22:23:55 ha-326307 containerd[767]: time="2025-09-19T22:23:55.765750080Z" level=info msg="CreateContainer within sandbox \"132023f3347828aa89cc27cb846b63299e7492a9d95f45bd87fa130aee9b5cee\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Sep 19 22:23:55 ha-326307 containerd[767]: time="2025-09-19T22:23:55.779792584Z" level=info msg="CreateContainer within sandbox \"132023f3347828aa89cc27cb846b63299e7492a9d95f45bd87fa130aee9b5cee\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"ca68bbc020e2091cdd81beb73e5c446a19425f555a16039acec158683b396c93\""
Sep 19 22:23:55 ha-326307 containerd[767]: time="2025-09-19T22:23:55.780572301Z" level=info msg="StartContainer for \"ca68bbc020e2091cdd81beb73e5c446a19425f555a16039acec158683b396c93\""
Sep 19 22:23:55 ha-326307 containerd[767]: time="2025-09-19T22:23:55.854015268Z" level=info msg="StartContainer for \"ca68bbc020e2091cdd81beb73e5c446a19425f555a16039acec158683b396c93\" returns successfully"
Sep 19 22:25:19 ha-326307 containerd[767]: time="2025-09-19T22:25:19.151709073Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox-7b57f96db7-m8swj,Uid:7533a5f9-7c6d-4476-9e03-eb8abe0aadbc,Namespace:default,Attempt:0,}"
Sep 19 22:25:19 ha-326307 containerd[767]: time="2025-09-19T22:25:19.267660233Z" level=warning msg="error from *cgroupsv2.Manager.EventChan" error="failed to create inotify fd"
Sep 19 22:25:19 ha-326307 containerd[767]: time="2025-09-19T22:25:19.268098400Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox-7b57f96db7-m8swj,Uid:7533a5f9-7c6d-4476-9e03-eb8abe0aadbc,Namespace:default,Attempt:0,} returns sandbox id \"b5e0c0fffea25b8c53f5de67f8e65d99323d23e48eb0c0ac619fcba386c566a1\""
Sep 19 22:25:19 ha-326307 containerd[767]: time="2025-09-19T22:25:19.270196453Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28\""
Sep 19 22:25:21 ha-326307 containerd[767]: time="2025-09-19T22:25:21.412014033Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Sep 19 22:25:21 ha-326307 containerd[767]: time="2025-09-19T22:25:21.413088793Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28: active requests=0, bytes read=727667"
Sep 19 22:25:21 ha-326307 containerd[767]: time="2025-09-19T22:25:21.414707234Z" level=info msg="ImageCreate event name:\"sha256:8c811b4aec35f259572d0f79207bc0678df4c736eeec50bc9fec37ed936a472a\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Sep 19 22:25:21 ha-326307 containerd[767]: time="2025-09-19T22:25:21.417602556Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:9afb80db71730dbb303fe00765cbf34bddbdc6b66e49897fc2e1861967584b12\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Sep 19 22:25:21 ha-326307 containerd[767]: time="2025-09-19T22:25:21.418335313Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28\" with image id \"sha256:8c811b4aec35f259572d0f79207bc0678df4c736eeec50bc9fec37ed936a472a\", repo tag \"gcr.io/k8s-minikube/busybox:1.28\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:9afb80db71730dbb303fe00765cbf34bddbdc6b66e49897fc2e1861967584b12\", size \"725911\" in 2.148090964s"
Sep 19 22:25:21 ha-326307 containerd[767]: time="2025-09-19T22:25:21.418383876Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28\" returns image reference \"sha256:8c811b4aec35f259572d0f79207bc0678df4c736eeec50bc9fec37ed936a472a\""
Sep 19 22:25:21 ha-326307 containerd[767]: time="2025-09-19T22:25:21.423388311Z" level=info msg="CreateContainer within sandbox \"b5e0c0fffea25b8c53f5de67f8e65d99323d23e48eb0c0ac619fcba386c566a1\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Sep 19 22:25:21 ha-326307 containerd[767]: time="2025-09-19T22:25:21.442455841Z" level=info msg="CreateContainer within sandbox \"b5e0c0fffea25b8c53f5de67f8e65d99323d23e48eb0c0ac619fcba386c566a1\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"7791f71e5d5a520d6ef052d5759a1050a768d5b2e137e791635bcd0e97251f08\""
Sep 19 22:25:21 ha-326307 containerd[767]: time="2025-09-19T22:25:21.443119612Z" level=info msg="StartContainer for \"7791f71e5d5a520d6ef052d5759a1050a768d5b2e137e791635bcd0e97251f08\""
Sep 19 22:25:21 ha-326307 containerd[767]: time="2025-09-19T22:25:21.497884940Z" level=warning msg="error from *cgroupsv2.Manager.EventChan" error="failed to create inotify fd"
Sep 19 22:25:21 ha-326307 containerd[767]: time="2025-09-19T22:25:21.500641712Z" level=info msg="StartContainer for \"7791f71e5d5a520d6ef052d5759a1050a768d5b2e137e791635bcd0e97251f08\" returns successfully"
==> coredns [1f618dc8f039242512f0147a2a38ee8cc0d3d5730c44724d6fc5d4c498121cd6] <==
maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] 127.0.0.1:54337 - 24572 "HINFO IN 5143313645322175939.5313042790825403134. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.069732464s
[INFO] 10.244.0.4:35490 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000326279s
[INFO] 10.244.0.4:55330 - 5 "PTR IN 135.186.33.3.in-addr.arpa. udp 43 false 512" NOERROR qr,rd,ra 124 0.014239882s
[INFO] 10.244.1.2:39628 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000210602s
[INFO] 10.244.1.2:46891 - 4 "A IN kubernetes.io. udp 31 false 512" NOERROR qr,rd,ra 89 0.001261026s
[INFO] 10.244.1.2:43124 - 5 "PTR IN 90.167.197.15.in-addr.arpa. udp 44 false 512" NOERROR qr,rd,ra 126 0.00098216s
[INFO] 10.244.1.2:49555 - 6 "PTR IN 135.186.33.3.in-addr.arpa. udp 43 false 512" NOERROR qr,aa,rd,ra 124 0.00013424s
[INFO] 10.244.0.4:40362 - 5 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.00024135s
[INFO] 10.244.0.4:45629 - 9 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000168694s
[INFO] 10.244.1.2:52354 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000189457s
[INFO] 10.244.1.2:43857 - 4 "AAAA IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000161715s
[INFO] 10.244.1.2:51922 - 5 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000145764s
[INFO] 10.244.1.2:57320 - 6 "A IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,rd,ra 111 0.009888497s
[INFO] 10.244.1.2:49841 - 9 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000169285s
[INFO] 10.244.0.4:51548 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000159656s
[INFO] 10.244.0.4:48681 - 5 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000110507s
[INFO] 10.244.1.2:52993 - 5 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000137337s
==> coredns [ca68bbc020e2091cdd81beb73e5c446a19425f555a16039acec158683b396c93] <==
maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] 127.0.0.1:49588 - 50300 "HINFO IN 9047056621409016881.2982736294753326061. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.063128768s
[INFO] 10.244.0.4:39328 - 3 "AAAA IN kubernetes.io. udp 31 false 512" NOERROR qr,rd,ra 140 0.014249598s
[INFO] 10.244.0.4:59759 - 4 "A IN kubernetes.io. udp 31 false 512" NOERROR qr,rd,ra 89 0.013332957s
[INFO] 10.244.0.4:50336 - 6 "PTR IN 90.167.197.15.in-addr.arpa. udp 44 false 512" NOERROR qr,rd,ra 126 0.008865788s
[INFO] 10.244.1.2:42753 - 3 "AAAA IN kubernetes.io. udp 31 false 512" NOERROR qr,aa,rd,ra 140 0.000158745s
[INFO] 10.244.0.4:52334 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000261159s
[INFO] 10.244.0.4:43558 - 3 "AAAA IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,rd,ra 111 0.010645205s
[INFO] 10.244.0.4:51059 - 4 "AAAA IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000154122s
[INFO] 10.244.0.4:46147 - 6 "A IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,rd,ra 111 0.012594143s
[INFO] 10.244.0.4:39163 - 7 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000143755s
[INFO] 10.244.0.4:57061 - 8 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.00014731s
[INFO] 10.244.1.2:59502 - 3 "AAAA IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,aa,rd,ra 111 0.000129746s
[INFO] 10.244.1.2:49570 - 7 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000172915s
[INFO] 10.244.1.2:48519 - 8 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000175653s
[INFO] 10.244.0.4:50569 - 3 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000326714s
[INFO] 10.244.0.4:45465 - 4 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000234038s
[INFO] 10.244.1.2:52569 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000176154s
[INFO] 10.244.1.2:36719 - 3 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000205481s
[INFO] 10.244.1.2:58705 - 4 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000195468s
==> describe nodes <==
Name: ha-326307
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=ha-326307
kubernetes.io/os=linux
minikube.k8s.io/commit=6e37ee63f758843bb5fe33c3a528c564c4b83d53
minikube.k8s.io/name=ha-326307
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_09_19T22_23_35_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 19 Sep 2025 22:23:31 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: ha-326307
AcquireTime: <unset>
RenewTime: Fri, 19 Sep 2025 22:37:21 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 19 Sep 2025 22:35:38 +0000 Fri, 19 Sep 2025 22:23:31 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 19 Sep 2025 22:35:38 +0000 Fri, 19 Sep 2025 22:23:31 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 19 Sep 2025 22:35:38 +0000 Fri, 19 Sep 2025 22:23:31 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 19 Sep 2025 22:35:38 +0000 Fri, 19 Sep 2025 22:23:32 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: ha-326307
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863456Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863456Ki
pods: 110
System Info:
Machine ID: 2616418f44a84ee78b49dce19e95d1fb
System UUID: 9c3f30ed-68b2-4a1c-af95-9031ae210a78
Boot ID: 760555a9-6fca-43eb-a2c3-c2de6bc00e61
Kernel Version: 6.8.0-1037-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.7.27
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox-7b57f96db7-m8swj 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system coredns-66bc5c9577-9j5pw 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 13m
kube-system coredns-66bc5c9577-wqvzd 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 13m
kube-system etcd-ha-326307 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 13m
kube-system kindnet-gxnzs 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 13m
kube-system kube-apiserver-ha-326307 250m (3%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-controller-manager-ha-326307 200m (2%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-proxy-8kxtv 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-scheduler-ha-326307 100m (1%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-vip-ha-326307 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 950m (11%) 100m (1%)
memory 290Mi (0%) 390Mi (1%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 13m kube-proxy
Normal NodeAllocatableEnforced 13m kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 13m (x8 over 13m) kubelet Node ha-326307 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 13m (x8 over 13m) kubelet Node ha-326307 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 13m (x7 over 13m) kubelet Node ha-326307 status is now: NodeHasSufficientPID
Normal Starting 13m kubelet Starting kubelet.
Normal NodeAllocatableEnforced 13m kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 13m kubelet Node ha-326307 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 13m kubelet Node ha-326307 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 13m kubelet Node ha-326307 status is now: NodeHasSufficientPID
Normal RegisteredNode 13m node-controller Node ha-326307 event: Registered Node ha-326307 in Controller
Normal RegisteredNode 13m node-controller Node ha-326307 event: Registered Node ha-326307 in Controller
Normal RegisteredNode 12m node-controller Node ha-326307 event: Registered Node ha-326307 in Controller
Name: ha-326307-m02
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=ha-326307-m02
kubernetes.io/os=linux
minikube.k8s.io/commit=6e37ee63f758843bb5fe33c3a528c564c4b83d53
minikube.k8s.io/name=ha-326307
minikube.k8s.io/primary=false
minikube.k8s.io/updated_at=2025_09_19T22_24_08_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 19 Sep 2025 22:24:08 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: ha-326307-m02
AcquireTime: <unset>
RenewTime: Fri, 19 Sep 2025 22:37:24 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 19 Sep 2025 22:35:02 +0000 Fri, 19 Sep 2025 22:24:08 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 19 Sep 2025 22:35:02 +0000 Fri, 19 Sep 2025 22:24:08 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 19 Sep 2025 22:35:02 +0000 Fri, 19 Sep 2025 22:24:08 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 19 Sep 2025 22:35:02 +0000 Fri, 19 Sep 2025 22:24:11 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.3
Hostname: ha-326307-m02
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863456Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863456Ki
pods: 110
System Info:
Machine ID: e4f3b60b3b464269bc193e23d4361613
System UUID: 8095cd89-f43b-4d8a-adef-b40d6aaa7ad2
Boot ID: 760555a9-6fca-43eb-a2c3-c2de6bc00e61
Kernel Version: 6.8.0-1037-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.7.27
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.1.0/24
PodCIDRs: 10.244.1.0/24
Non-terminated Pods: (8 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox-7b57f96db7-tfpvf 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system etcd-ha-326307-m02 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 13m
kube-system kindnet-mk6pv 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 13m
kube-system kube-apiserver-ha-326307-m02 250m (3%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-controller-manager-ha-326307-m02 200m (2%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-proxy-q8mtj 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-scheduler-ha-326307-m02 100m (1%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-vip-ha-326307-m02 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%) 100m (1%)
memory 150Mi (0%) 50Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 13m kube-proxy
Normal RegisteredNode 13m node-controller Node ha-326307-m02 event: Registered Node ha-326307-m02 in Controller
Normal RegisteredNode 13m node-controller Node ha-326307-m02 event: Registered Node ha-326307-m02 in Controller
Normal RegisteredNode 12m node-controller Node ha-326307-m02 event: Registered Node ha-326307-m02 in Controller
Name: ha-326307-m03
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=ha-326307-m03
kubernetes.io/os=linux
minikube.k8s.io/commit=6e37ee63f758843bb5fe33c3a528c564c4b83d53
minikube.k8s.io/name=ha-326307
minikube.k8s.io/primary=false
minikube.k8s.io/updated_at=2025_09_19T22_24_32_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 19 Sep 2025 22:24:31 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: ha-326307-m03
AcquireTime: <unset>
RenewTime: Fri, 19 Sep 2025 22:37:17 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 19 Sep 2025 22:34:44 +0000 Fri, 19 Sep 2025 22:24:31 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 19 Sep 2025 22:34:44 +0000 Fri, 19 Sep 2025 22:24:31 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 19 Sep 2025 22:34:44 +0000 Fri, 19 Sep 2025 22:24:31 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 19 Sep 2025 22:34:44 +0000 Fri, 19 Sep 2025 22:24:34 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.4
Hostname: ha-326307-m03
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863456Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863456Ki
pods: 110
System Info:
Machine ID: 1434e19b2a274233a619428a76d99322
System UUID: 5814a8d4-c435-490f-8e5e-a8b038e01be7
Boot ID: 760555a9-6fca-43eb-a2c3-c2de6bc00e61
Kernel Version: 6.8.0-1037-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.7.27
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.2.0/24
PodCIDRs: 10.244.2.0/24
Non-terminated Pods: (8 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox-7b57f96db7-jdczt 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system etcd-ha-326307-m03 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 12m
kube-system kindnet-dmxl8 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 12m
kube-system kube-apiserver-ha-326307-m03 250m (3%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-controller-manager-ha-326307-m03 200m (2%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-proxy-ws89d 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-scheduler-ha-326307-m03 100m (1%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-vip-ha-326307-m03 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%) 100m (1%)
memory 150Mi (0%) 50Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal RegisteredNode 12m node-controller Node ha-326307-m03 event: Registered Node ha-326307-m03 in Controller
Normal RegisteredNode 12m node-controller Node ha-326307-m03 event: Registered Node ha-326307-m03 in Controller
Normal RegisteredNode 12m node-controller Node ha-326307-m03 event: Registered Node ha-326307-m03 in Controller
==> dmesg <==
[Sep19 21:17] MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.
[ +0.001853] TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.
[ +0.001006] MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.
[ +0.090013] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.467231] i8042: Warning: Keylock active
[ +0.009747] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.004210] platform eisa.0: EISA: Cannot allocate resource for mainboard
[ +0.001075] platform eisa.0: Cannot allocate resource for EISA slot 1
[ +0.000901] platform eisa.0: Cannot allocate resource for EISA slot 2
[ +0.000907] platform eisa.0: Cannot allocate resource for EISA slot 3
[ +0.000709] platform eisa.0: Cannot allocate resource for EISA slot 4
[ +0.000908] platform eisa.0: Cannot allocate resource for EISA slot 5
[ +0.000709] platform eisa.0: Cannot allocate resource for EISA slot 6
[ +0.001042] platform eisa.0: Cannot allocate resource for EISA slot 7
[ +0.001465] platform eisa.0: Cannot allocate resource for EISA slot 8
[ +0.534799] block sda: the capability attribute has been deprecated.
[ +0.099617] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.027269] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +8.089616] kauditd_printk_skb: 47 callbacks suppressed
==> etcd [e5c59a6abe97751de42afd27010936d1c3a401fad6cd730e75a1692a895b4fbc] <==
{"level":"warn","ts":"2025-09-19T22:24:25.337105Z","caller":"rafthttp/stream.go:420","msg":"lost TCP streaming connection with remote peer","stream-reader-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"5512420eb470d1ce","error":"EOF"}
{"level":"warn","ts":"2025-09-19T22:24:25.337366Z","caller":"rafthttp/stream.go:420","msg":"lost TCP streaming connection with remote peer","stream-reader-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"5512420eb470d1ce","error":"EOF"}
{"level":"info","ts":"2025-09-19T22:24:25.352476Z","caller":"rafthttp/stream.go:248","msg":"set message encoder","from":"aec36adc501070cc","to":"5512420eb470d1ce","stream-type":"stream MsgApp v2"}
{"level":"warn","ts":"2025-09-19T22:24:25.352519Z","caller":"rafthttp/stream.go:264","msg":"closed TCP streaming connection with remote peer","stream-writer-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"5512420eb470d1ce"}
{"level":"info","ts":"2025-09-19T22:24:25.352532Z","caller":"rafthttp/stream.go:273","msg":"established TCP streaming connection with remote peer","stream-writer-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"5512420eb470d1ce"}
{"level":"info","ts":"2025-09-19T22:24:25.355631Z","caller":"rafthttp/stream.go:248","msg":"set message encoder","from":"aec36adc501070cc","to":"5512420eb470d1ce","stream-type":"stream Message"}
{"level":"warn","ts":"2025-09-19T22:24:25.355692Z","caller":"rafthttp/stream.go:264","msg":"closed TCP streaming connection with remote peer","stream-writer-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"5512420eb470d1ce"}
{"level":"info","ts":"2025-09-19T22:24:25.355712Z","caller":"rafthttp/stream.go:273","msg":"established TCP streaming connection with remote peer","stream-writer-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"5512420eb470d1ce"}
{"level":"info","ts":"2025-09-19T22:24:25.427429Z","caller":"rafthttp/stream.go:411","msg":"established TCP streaming connection with remote peer","stream-reader-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"5512420eb470d1ce"}
{"level":"info","ts":"2025-09-19T22:24:25.428290Z","caller":"rafthttp/stream.go:411","msg":"established TCP streaming connection with remote peer","stream-reader-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"5512420eb470d1ce"}
{"level":"warn","ts":"2025-09-19T22:24:25.447984Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"192.168.49.4:32950","server-name":"","error":"EOF"}
{"level":"info","ts":"2025-09-19T22:24:25.491427Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"aec36adc501070cc switched to configuration voters=(6130034673728934350 12593026477526642892 16449250771884659557)"}
{"level":"info","ts":"2025-09-19T22:24:25.491593Z","caller":"membership/cluster.go:550","msg":"promote member","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","promoted-member-id":"5512420eb470d1ce"}
{"level":"info","ts":"2025-09-19T22:24:25.491634Z","caller":"etcdserver/server.go:1752","msg":"applied a configuration change through raft","local-member-id":"aec36adc501070cc","raft-conf-change":"ConfChangeAddNode","raft-conf-change-node-id":"5512420eb470d1ce"}
{"level":"warn","ts":"2025-09-19T22:24:25.493734Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"192.168.49.4:32956","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-09-19T22:24:25.530775Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"192.168.49.4:32980","server-name":"","error":"EOF"}
{"level":"info","ts":"2025-09-19T22:24:25.607668Z","caller":"etcdserver/server.go:1856","msg":"sent merged snapshot","from":"aec36adc501070cc","to":"e4477a6cd7815365","bytes":946167,"size":"946 kB","took":"30.009579431s"}
{"level":"info","ts":"2025-09-19T22:24:29.797825Z","caller":"etcdserver/server.go:2246","msg":"skip compaction since there is an inflight snapshot"}
{"level":"info","ts":"2025-09-19T22:24:31.923615Z","caller":"etcdserver/server.go:2246","msg":"skip compaction since there is an inflight snapshot"}
{"level":"info","ts":"2025-09-19T22:24:35.871798Z","caller":"etcdserver/server.go:2246","msg":"skip compaction since there is an inflight snapshot"}
{"level":"info","ts":"2025-09-19T22:24:53.749925Z","caller":"etcdserver/server.go:2246","msg":"skip compaction since there is an inflight snapshot"}
{"level":"info","ts":"2025-09-19T22:24:55.314881Z","caller":"etcdserver/server.go:1856","msg":"sent merged snapshot","from":"aec36adc501070cc","to":"5512420eb470d1ce","bytes":1356311,"size":"1.4 MB","took":"30.015547589s"}
{"level":"info","ts":"2025-09-19T22:33:30.750666Z","caller":"mvcc/index.go:194","msg":"compact tree index","revision":1558}
{"level":"info","ts":"2025-09-19T22:33:30.775074Z","caller":"mvcc/kvstore_compaction.go:70","msg":"finished scheduled compaction","compact-revision":1558,"took":"23.935678ms","hash":623549535,"current-db-size-bytes":4292608,"current-db-size":"4.3 MB","current-db-size-in-use-bytes":2306048,"current-db-size-in-use":"2.3 MB"}
{"level":"info","ts":"2025-09-19T22:33:30.775132Z","caller":"mvcc/hash.go:157","msg":"storing new hash","hash":623549535,"revision":1558,"compact-revision":-1}
==> kernel <==
22:37:25 up 1:19, 0 users, load average: 0.60, 0.58, 0.70
Linux ha-326307 6.8.0-1037-gcp #39~22.04.1-Ubuntu SMP Thu Aug 21 17:29:24 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kindnet [365cc00c2e009eeed7e71d1202a4d406c12f0d9faee38762ba691eb2d7c71f89] <==
I0919 22:36:40.991246 1 main.go:324] Node ha-326307-m03 has CIDR [10.244.2.0/24]
I0919 22:36:50.998290 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0919 22:36:50.998332 1 main.go:301] handling current node
I0919 22:36:50.998351 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0919 22:36:50.998359 1 main.go:324] Node ha-326307-m02 has CIDR [10.244.1.0/24]
I0919 22:36:50.998554 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0919 22:36:50.998568 1 main.go:324] Node ha-326307-m03 has CIDR [10.244.2.0/24]
I0919 22:37:00.996278 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0919 22:37:00.996316 1 main.go:301] handling current node
I0919 22:37:00.996331 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0919 22:37:00.996336 1 main.go:324] Node ha-326307-m02 has CIDR [10.244.1.0/24]
I0919 22:37:00.996584 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0919 22:37:00.996603 1 main.go:324] Node ha-326307-m03 has CIDR [10.244.2.0/24]
I0919 22:37:10.992294 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0919 22:37:10.992334 1 main.go:324] Node ha-326307-m03 has CIDR [10.244.2.0/24]
I0919 22:37:10.992571 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0919 22:37:10.992589 1 main.go:301] handling current node
I0919 22:37:10.992605 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0919 22:37:10.992614 1 main.go:324] Node ha-326307-m02 has CIDR [10.244.1.0/24]
I0919 22:37:20.990243 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0919 22:37:20.990316 1 main.go:324] Node ha-326307-m03 has CIDR [10.244.2.0/24]
I0919 22:37:20.990527 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0919 22:37:20.990541 1 main.go:301] handling current node
I0919 22:37:20.990553 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0919 22:37:20.990557 1 main.go:324] Node ha-326307-m02 has CIDR [10.244.1.0/24]
==> kube-apiserver [e80d65e3c7c18da87f7fa003e39382f5a4285ba4782fc295197421c6b882a161] <==
I0919 22:28:24.938045 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:28:27.132243 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:29:46.201118 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:29:52.628026 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:30:52.147734 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:30:53.858237 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:32:15.996526 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:32:22.110278 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:33:31.733595 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0919 22:33:36.316232 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:33:41.440724 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:34:43.430235 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:35:04.843923 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:35:47.576277 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:36:07.778568 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:37:07.288814 1 stats.go:136] "Error getting keys" err="empty key: \"\""
E0919 22:37:22.531524 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:43412: use of closed network connection
E0919 22:37:22.776721 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:43434: use of closed network connection
E0919 22:37:22.970082 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:43448: use of closed network connection
E0919 22:37:23.110093 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:43464: use of closed network connection
E0919 22:37:23.308629 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:43484: use of closed network connection
E0919 22:37:23.494833 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:43500: use of closed network connection
E0919 22:37:23.634448 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:43520: use of closed network connection
E0919 22:37:23.803885 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:43532: use of closed network connection
E0919 22:37:23.968210 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:43546: use of closed network connection
==> kube-controller-manager [05ab0247624a7f8ffa6bc948e3abc3adc49911c297291eb0a6dd42e3df39f4cd] <==
I0919 22:23:38.744532 1 shared_informer.go:356] "Caches are synced" controller="attach detach"
I0919 22:23:38.744726 1 shared_informer.go:356] "Caches are synced" controller="bootstrap_signer"
I0919 22:23:38.744739 1 shared_informer.go:356] "Caches are synced" controller="service-cidr-controller"
I0919 22:23:38.744729 1 shared_informer.go:356] "Caches are synced" controller="resource_claim"
I0919 22:23:38.744737 1 shared_informer.go:356] "Caches are synced" controller="crt configmap"
I0919 22:23:38.744759 1 shared_informer.go:356] "Caches are synced" controller="endpoint"
I0919 22:23:38.745195 1 shared_informer.go:356] "Caches are synced" controller="TTL after finished"
I0919 22:23:38.745255 1 shared_informer.go:356] "Caches are synced" controller="job"
I0919 22:23:38.746448 1 shared_informer.go:356] "Caches are synced" controller="GC"
I0919 22:23:38.748706 1 shared_informer.go:356] "Caches are synced" controller="ReplicationController"
I0919 22:23:38.750017 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I0919 22:23:38.750086 1 shared_informer.go:356] "Caches are synced" controller="deployment"
I0919 22:23:38.751270 1 shared_informer.go:356] "Caches are synced" controller="namespace"
I0919 22:23:38.760899 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-legacy-unknown"
I0919 22:23:38.760926 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client"
I0919 22:23:38.760971 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-client"
I0919 22:23:38.765332 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I0919 22:23:38.771790 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I0919 22:24:08.307746 1 actual_state_of_world.go:541] "Failed to update statusUpdateNeeded field in actual state of world" logger="persistentvolume-attach-detach-controller" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"ha-326307-m02\" does not exist"
I0919 22:24:08.319829 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="ha-326307-m02" podCIDRs=["10.244.1.0/24"]
I0919 22:24:08.699971 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="ha-326307-m02"
E0919 22:24:31.036531 1 certificate_controller.go:151] "Unhandled Error" err="Sync csr-8ztpb failed with : error updating signature for csr: Operation cannot be fulfilled on certificatesigningrequests.certificates.k8s.io \"csr-8ztpb\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError"
I0919 22:24:31.706808 1 actual_state_of_world.go:541] "Failed to update statusUpdateNeeded field in actual state of world" logger="persistentvolume-attach-detach-controller" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"ha-326307-m03\" does not exist"
I0919 22:24:31.736561 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="ha-326307-m03" podCIDRs=["10.244.2.0/24"]
I0919 22:24:33.715916 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="ha-326307-m03"
==> kube-proxy [bd9e41958ffbbb27dab3d180a56fb27df36a1a1896db3a6f322a8aabcda57677] <==
I0919 22:23:40.183862 1 server_linux.go:53] "Using iptables proxy"
I0919 22:23:40.251957 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I0919 22:23:40.353105 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I0919 22:23:40.353291 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.49.2"]
E0919 22:23:40.353503 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0919 22:23:40.383440 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0919 22:23:40.383522 1 server_linux.go:132] "Using iptables Proxier"
I0919 22:23:40.391534 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0919 22:23:40.391999 1 server.go:527] "Version info" version="v1.34.0"
I0919 22:23:40.392045 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0919 22:23:40.394189 1 config.go:403] "Starting serviceCIDR config controller"
I0919 22:23:40.394304 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I0919 22:23:40.394470 1 config.go:106] "Starting endpoint slice config controller"
I0919 22:23:40.394480 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I0919 22:23:40.394266 1 config.go:200] "Starting service config controller"
I0919 22:23:40.394506 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I0919 22:23:40.394279 1 config.go:309] "Starting node config controller"
I0919 22:23:40.394533 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I0919 22:23:40.394540 1 shared_informer.go:356] "Caches are synced" controller="node config"
I0919 22:23:40.494617 1 shared_informer.go:356] "Caches are synced" controller="service config"
I0919 22:23:40.494643 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I0919 22:23:40.494649 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
==> kube-scheduler [456a0c3cbf5ce028b9cbac658728c1fee13ad8e2659bfa0c625cd685d711c708] <==
E0919 22:23:33.115040 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E0919 22:23:33.129278 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E0919 22:23:33.194774 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
E0919 22:23:33.337699 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap"
I0919 22:23:35.055089 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
E0919 22:24:08.346116 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"kindnet-mk6pv\": pod kindnet-mk6pv is already assigned to node \"ha-326307-m02\"" plugin="DefaultBinder" pod="kube-system/kindnet-mk6pv" node="ha-326307-m02"
E0919 22:24:08.346301 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"kindnet-mk6pv\": pod kindnet-mk6pv is already assigned to node \"ha-326307-m02\"" logger="UnhandledError" pod="kube-system/kindnet-mk6pv"
E0919 22:24:08.365410 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"kindnet-78xs2\": pod kindnet-78xs2 is being deleted, cannot be assigned to a host" plugin="DefaultBinder" pod="kube-system/kindnet-78xs2" node="ha-326307-m02"
E0919 22:24:08.365600 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"kindnet-78xs2\": pod kindnet-78xs2 is being deleted, cannot be assigned to a host" logger="UnhandledError" pod="kube-system/kindnet-78xs2"
E0919 22:24:08.379248 1 pod_status_patch.go:111] "Failed to patch pod status" err="pods \"kindnet-78xs2\" not found" pod="kube-system/kindnet-78xs2"
E0919 22:24:10.002296 1 schedule_one.go:975] "Scheduler cache AssumePod failed" err="pod 71a20992-8279-4040-9edc-bedef6e7b570(kube-system/kindnet-mk6pv) is in the cache, so can't be assumed" pod="kube-system/kindnet-mk6pv"
E0919 22:24:10.002334 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="pod 71a20992-8279-4040-9edc-bedef6e7b570(kube-system/kindnet-mk6pv) is in the cache, so can't be assumed" logger="UnhandledError" pod="kube-system/kindnet-mk6pv"
I0919 22:24:10.002368 1 schedule_one.go:1092] "Pod has been assigned to node. Abort adding it back to queue." pod="kube-system/kindnet-mk6pv" node="ha-326307-m02"
E0919 22:24:31.751287 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"kindnet-pnj9r\": pod kindnet-pnj9r is already assigned to node \"ha-326307-m03\"" plugin="DefaultBinder" pod="kube-system/kindnet-pnj9r" node="ha-326307-m03"
E0919 22:24:31.751375 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"kindnet-pnj9r\": pod kindnet-pnj9r is already assigned to node \"ha-326307-m03\"" logger="UnhandledError" pod="kube-system/kindnet-pnj9r"
E0919 22:24:31.887089 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"kindnet-qxwpq\": pod kindnet-qxwpq is already assigned to node \"ha-326307-m03\"" plugin="DefaultBinder" pod="kube-system/kindnet-qxwpq" node="ha-326307-m03"
E0919 22:24:31.887576 1 schedule_one.go:379] "scheduler cache ForgetPod failed" err="pod 173e48ec-ef56-4824-9f55-a04b199b7943(kube-system/kindnet-qxwpq) wasn't assumed so cannot be forgotten" logger="UnhandledError" pod="kube-system/kindnet-qxwpq"
E0919 22:24:31.887605 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"kindnet-qxwpq\": pod kindnet-qxwpq is already assigned to node \"ha-326307-m03\"" logger="UnhandledError" pod="kube-system/kindnet-qxwpq"
I0919 22:24:31.888969 1 schedule_one.go:1092] "Pod has been assigned to node. Abort adding it back to queue." pod="kube-system/kindnet-qxwpq" node="ha-326307-m03"
E0919 22:24:35.828083 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"kindnet-nzzlb\": pod kindnet-nzzlb is already assigned to node \"ha-326307-m03\"" plugin="DefaultBinder" pod="kube-system/kindnet-nzzlb" node="ha-326307-m03"
E0919 22:24:35.828187 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"kindnet-nzzlb\": pod kindnet-nzzlb is already assigned to node \"ha-326307-m03\"" logger="UnhandledError" pod="kube-system/kindnet-nzzlb"
E0919 22:24:35.839864 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"kindnet-dmxl8\": pod kindnet-dmxl8 is already assigned to node \"ha-326307-m03\"" plugin="DefaultBinder" pod="kube-system/kindnet-dmxl8" node="ha-326307-m03"
E0919 22:24:35.839940 1 schedule_one.go:379] "scheduler cache ForgetPod failed" err="pod ba4fd407-2e93-4324-ab2d-4f192d79fdf5(kube-system/kindnet-dmxl8) wasn't assumed so cannot be forgotten" logger="UnhandledError" pod="kube-system/kindnet-dmxl8"
E0919 22:24:35.839964 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"kindnet-dmxl8\": pod kindnet-dmxl8 is already assigned to node \"ha-326307-m03\"" logger="UnhandledError" pod="kube-system/kindnet-dmxl8"
I0919 22:24:35.841757 1 schedule_one.go:1092] "Pod has been assigned to node. Abort adding it back to queue." pod="kube-system/kindnet-dmxl8" node="ha-326307-m03"
==> kubelet <==
Sep 19 22:23:39 ha-326307 kubelet[1670]: I0919 22:23:39.638035 1670 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/4fa827fc-0ba7-49b7-a225-e36d76241d92-cni-cfg\") pod \"kindnet-gxnzs\" (UID: \"4fa827fc-0ba7-49b7-a225-e36d76241d92\") " pod="kube-system/kindnet-gxnzs"
Sep 19 22:23:39 ha-326307 kubelet[1670]: I0919 22:23:39.638087 1670 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/4fa827fc-0ba7-49b7-a225-e36d76241d92-xtables-lock\") pod \"kindnet-gxnzs\" (UID: \"4fa827fc-0ba7-49b7-a225-e36d76241d92\") " pod="kube-system/kindnet-gxnzs"
Sep 19 22:23:39 ha-326307 kubelet[1670]: I0919 22:23:39.638115 1670 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/70be5fcc-7ab6-4eb1-870d-988fee1a01bb-kube-proxy\") pod \"kube-proxy-8kxtv\" (UID: \"70be5fcc-7ab6-4eb1-870d-988fee1a01bb\") " pod="kube-system/kube-proxy-8kxtv"
Sep 19 22:23:40 ha-326307 kubelet[1670]: I0919 22:23:40.140870 1670 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/64376c4d-1b82-490d-887d-7f628b134014-config-volume\") pod \"coredns-66bc5c9577-wqvzd\" (UID: \"64376c4d-1b82-490d-887d-7f628b134014\") " pod="kube-system/coredns-66bc5c9577-wqvzd"
Sep 19 22:23:40 ha-326307 kubelet[1670]: I0919 22:23:40.140945 1670 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7d073e38-b63e-494d-bda0-3dde372a950b-config-volume\") pod \"coredns-66bc5c9577-9j5pw\" (UID: \"7d073e38-b63e-494d-bda0-3dde372a950b\") " pod="kube-system/coredns-66bc5c9577-9j5pw"
Sep 19 22:23:40 ha-326307 kubelet[1670]: I0919 22:23:40.140976 1670 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tkhk\" (UniqueName: \"kubernetes.io/projected/64376c4d-1b82-490d-887d-7f628b134014-kube-api-access-8tkhk\") pod \"coredns-66bc5c9577-wqvzd\" (UID: \"64376c4d-1b82-490d-887d-7f628b134014\") " pod="kube-system/coredns-66bc5c9577-wqvzd"
Sep 19 22:23:40 ha-326307 kubelet[1670]: I0919 22:23:40.141004 1670 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gmbw\" (UniqueName: \"kubernetes.io/projected/7d073e38-b63e-494d-bda0-3dde372a950b-kube-api-access-8gmbw\") pod \"coredns-66bc5c9577-9j5pw\" (UID: \"7d073e38-b63e-494d-bda0-3dde372a950b\") " pod="kube-system/coredns-66bc5c9577-9j5pw"
Sep 19 22:23:40 ha-326307 kubelet[1670]: E0919 22:23:40.319752 1670 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"af2200130e8f39c8e1d2909ad486622b06624f2f496c35a15b1e5e3e0886ef65\": failed to find network info for sandbox \"af2200130e8f39c8e1d2909ad486622b06624f2f496c35a15b1e5e3e0886ef65\""
Sep 19 22:23:40 ha-326307 kubelet[1670]: E0919 22:23:40.319858 1670 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"af2200130e8f39c8e1d2909ad486622b06624f2f496c35a15b1e5e3e0886ef65\": failed to find network info for sandbox \"af2200130e8f39c8e1d2909ad486622b06624f2f496c35a15b1e5e3e0886ef65\"" pod="kube-system/coredns-66bc5c9577-wqvzd"
Sep 19 22:23:40 ha-326307 kubelet[1670]: E0919 22:23:40.319884 1670 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"af2200130e8f39c8e1d2909ad486622b06624f2f496c35a15b1e5e3e0886ef65\": failed to find network info for sandbox \"af2200130e8f39c8e1d2909ad486622b06624f2f496c35a15b1e5e3e0886ef65\"" pod="kube-system/coredns-66bc5c9577-wqvzd"
Sep 19 22:23:40 ha-326307 kubelet[1670]: E0919 22:23:40.319966 1670 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-wqvzd_kube-system(64376c4d-1b82-490d-887d-7f628b134014)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-wqvzd_kube-system(64376c4d-1b82-490d-887d-7f628b134014)\\\": rpc error: code = Unknown desc = failed to setup network for sandbox \\\"af2200130e8f39c8e1d2909ad486622b06624f2f496c35a15b1e5e3e0886ef65\\\": failed to find network info for sandbox \\\"af2200130e8f39c8e1d2909ad486622b06624f2f496c35a15b1e5e3e0886ef65\\\"\"" pod="kube-system/coredns-66bc5c9577-wqvzd" podUID="64376c4d-1b82-490d-887d-7f628b134014"
Sep 19 22:23:40 ha-326307 kubelet[1670]: E0919 22:23:40.332044 1670 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"533bf94488ad0e7905bcfea90e12375383188d3f9f0d630583575f8855eecb9d\": failed to find network info for sandbox \"533bf94488ad0e7905bcfea90e12375383188d3f9f0d630583575f8855eecb9d\""
Sep 19 22:23:40 ha-326307 kubelet[1670]: E0919 22:23:40.332130 1670 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"533bf94488ad0e7905bcfea90e12375383188d3f9f0d630583575f8855eecb9d\": failed to find network info for sandbox \"533bf94488ad0e7905bcfea90e12375383188d3f9f0d630583575f8855eecb9d\"" pod="kube-system/coredns-66bc5c9577-9j5pw"
Sep 19 22:23:40 ha-326307 kubelet[1670]: E0919 22:23:40.332205 1670 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"533bf94488ad0e7905bcfea90e12375383188d3f9f0d630583575f8855eecb9d\": failed to find network info for sandbox \"533bf94488ad0e7905bcfea90e12375383188d3f9f0d630583575f8855eecb9d\"" pod="kube-system/coredns-66bc5c9577-9j5pw"
Sep 19 22:23:40 ha-326307 kubelet[1670]: E0919 22:23:40.332288 1670 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-9j5pw_kube-system(7d073e38-b63e-494d-bda0-3dde372a950b)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-9j5pw_kube-system(7d073e38-b63e-494d-bda0-3dde372a950b)\\\": rpc error: code = Unknown desc = failed to setup network for sandbox \\\"533bf94488ad0e7905bcfea90e12375383188d3f9f0d630583575f8855eecb9d\\\": failed to find network info for sandbox \\\"533bf94488ad0e7905bcfea90e12375383188d3f9f0d630583575f8855eecb9d\\\"\"" pod="kube-system/coredns-66bc5c9577-9j5pw" podUID="7d073e38-b63e-494d-bda0-3dde372a950b"
Sep 19 22:23:40 ha-326307 kubelet[1670]: I0919 22:23:40.543914 1670 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/cafe04c6-2dce-4b93-b6d1-205efc39b360-tmp\") pod \"storage-provisioner\" (UID: \"cafe04c6-2dce-4b93-b6d1-205efc39b360\") " pod="kube-system/storage-provisioner"
Sep 19 22:23:40 ha-326307 kubelet[1670]: I0919 22:23:40.543969 1670 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47vqf\" (UniqueName: \"kubernetes.io/projected/cafe04c6-2dce-4b93-b6d1-205efc39b360-kube-api-access-47vqf\") pod \"storage-provisioner\" (UID: \"cafe04c6-2dce-4b93-b6d1-205efc39b360\") " pod="kube-system/storage-provisioner"
Sep 19 22:23:40 ha-326307 kubelet[1670]: I0919 22:23:40.684901 1670 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kindnet-gxnzs" podStartSLOduration=1.68487896 podStartE2EDuration="1.68487896s" podCreationTimestamp="2025-09-19 22:23:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-19 22:23:40.684630982 +0000 UTC m=+6.151051272" watchObservedRunningTime="2025-09-19 22:23:40.68487896 +0000 UTC m=+6.151299251"
Sep 19 22:23:40 ha-326307 kubelet[1670]: I0919 22:23:40.685802 1670 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-8kxtv" podStartSLOduration=1.685781067 podStartE2EDuration="1.685781067s" podCreationTimestamp="2025-09-19 22:23:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-19 22:23:40.670987608 +0000 UTC m=+6.137407898" watchObservedRunningTime="2025-09-19 22:23:40.685781067 +0000 UTC m=+6.152201360"
Sep 19 22:23:41 ha-326307 kubelet[1670]: I0919 22:23:41.676063 1670 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=1.676036489 podStartE2EDuration="1.676036489s" podCreationTimestamp="2025-09-19 22:23:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-19 22:23:41.675998333 +0000 UTC m=+7.142418624" watchObservedRunningTime="2025-09-19 22:23:41.676036489 +0000 UTC m=+7.142456778"
Sep 19 22:23:45 ha-326307 kubelet[1670]: I0919 22:23:45.164667 1670 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Sep 19 22:23:45 ha-326307 kubelet[1670]: I0919 22:23:45.165981 1670 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Sep 19 22:23:52 ha-326307 kubelet[1670]: I0919 22:23:52.703916 1670 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-wqvzd" podStartSLOduration=13.703896267 podStartE2EDuration="13.703896267s" podCreationTimestamp="2025-09-19 22:23:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-19 22:23:52.703429297 +0000 UTC m=+18.169849612" watchObservedRunningTime="2025-09-19 22:23:52.703896267 +0000 UTC m=+18.170316558"
Sep 19 22:23:56 ha-326307 kubelet[1670]: I0919 22:23:56.724956 1670 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-9j5pw" podStartSLOduration=17.724936721 podStartE2EDuration="17.724936721s" podCreationTimestamp="2025-09-19 22:23:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-19 22:23:56.724564031 +0000 UTC m=+22.190984322" watchObservedRunningTime="2025-09-19 22:23:56.724936721 +0000 UTC m=+22.191357012"
Sep 19 22:25:18 ha-326307 kubelet[1670]: I0919 22:25:18.904730 1670 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rt2kb\" (UniqueName: \"kubernetes.io/projected/7533a5f9-7c6d-4476-9e03-eb8abe0aadbc-kube-api-access-rt2kb\") pod \"busybox-7b57f96db7-m8swj\" (UID: \"7533a5f9-7c6d-4476-9e03-eb8abe0aadbc\") " pod="default/busybox-7b57f96db7-m8swj"
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p ha-326307 -n ha-326307
helpers_test.go:269: (dbg) Run: kubectl --context ha-326307 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: busybox-7b57f96db7-jdczt
helpers_test.go:282: ======> post-mortem[TestMultiControlPlane/serial/DeployApp]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context ha-326307 describe pod busybox-7b57f96db7-jdczt
helpers_test.go:290: (dbg) kubectl --context ha-326307 describe pod busybox-7b57f96db7-jdczt:
-- stdout --
Name: busybox-7b57f96db7-jdczt
Namespace: default
Priority: 0
Service Account: default
Node: ha-326307-m03/192.168.49.4
Start Time: Fri, 19 Sep 2025 22:25:18 +0000
Labels: app=busybox
pod-template-hash=7b57f96db7
Annotations: <none>
Status: Pending
IP:
IPs: <none>
Controlled By: ReplicaSet/busybox-7b57f96db7
Containers:
busybox:
Container ID:
Image: gcr.io/k8s-minikube/busybox:1.28
Image ID:
Port: <none>
Host Port: <none>
Command:
sleep
3600
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-xwg8l (ro)
Conditions:
Type Status
PodReadyToStartContainers False
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-xwg8l:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
Optional: false
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 12m default-scheduler running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "busybox-7b57f96db7-jdczt": pod busybox-7b57f96db7-jdczt is already assigned to node "ha-326307-m03"
Warning FailedScheduling 12m default-scheduler running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "busybox-7b57f96db7-jdczt": pod busybox-7b57f96db7-jdczt is already assigned to node "ha-326307-m03"
Normal Scheduled 12m default-scheduler Successfully assigned default/busybox-7b57f96db7-jdczt to ha-326307-m03
Warning FailedCreatePodSandBox 12m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "f949ef20a496c1ef4510b9586bfdf0aa02ea1ca9948f762b5576ef36acab80c9": failed to find network info for sandbox "f949ef20a496c1ef4510b9586bfdf0aa02ea1ca9948f762b5576ef36acab80c9"
Warning FailedCreatePodSandBox 11m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "306b20c8e47aaeb0b6ae068e406157020ceddab45da2b4f2ab7d80c0e47f4391": failed to find network info for sandbox "306b20c8e47aaeb0b6ae068e406157020ceddab45da2b4f2ab7d80c0e47f4391"
Warning FailedCreatePodSandBox 11m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e6c50e24733dc1514dd610f1c51f99bc1a57d10929036ae887a87c4b187b9ac1": failed to find network info for sandbox "e6c50e24733dc1514dd610f1c51f99bc1a57d10929036ae887a87c4b187b9ac1"
Warning FailedCreatePodSandBox 11m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "9d4e7715d1c071862624112264db649229347a018044c9075df60fb9940c8e8a": failed to find network info for sandbox "9d4e7715d1c071862624112264db649229347a018044c9075df60fb9940c8e8a"
Warning FailedCreatePodSandBox 11m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "d188384b76e1cf43ce05a368351c54023e455d3fd0fddf79dc0d717558b93ee6": failed to find network info for sandbox "d188384b76e1cf43ce05a368351c54023e455d3fd0fddf79dc0d717558b93ee6"
Warning FailedCreatePodSandBox 11m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "726dbde7347664ddd373a329867d125e92a7173ca43b01448ce154579a81a0bb": failed to find network info for sandbox "726dbde7347664ddd373a329867d125e92a7173ca43b01448ce154579a81a0bb"
Warning FailedCreatePodSandBox 10m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e45f823e38e8e10ed14077a52e1750763b0c366d8a775bbf53f656c10861f185": failed to find network info for sandbox "e45f823e38e8e10ed14077a52e1750763b0c366d8a775bbf53f656c10861f185"
Warning FailedCreatePodSandBox 10m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "92ae39dcfd89289f5a5fdc5ae0c23196a91b58214916aead7f95620a2697c009": failed to find network info for sandbox "92ae39dcfd89289f5a5fdc5ae0c23196a91b58214916aead7f95620a2697c009"
Warning FailedCreatePodSandBox 10m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "98a14fefd702a6a8ff4d95d8bebac62053e439ee134d840d76e175ba4e8c45d6": failed to find network info for sandbox "98a14fefd702a6a8ff4d95d8bebac62053e439ee134d840d76e175ba4e8c45d6"
Warning FailedCreatePodSandBox 2m (x39 over 10m) kubelet (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "68cb483b1808e73ea325cca055c7a7f1bd2a591a81aa8b2bcb8cb96560fd08b2": failed to find network info for sandbox "68cb483b1808e73ea325cca055c7a7f1bd2a591a81aa8b2bcb8cb96560fd08b2"
-- /stdout --
helpers_test.go:293: <<< TestMultiControlPlane/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestMultiControlPlane/serial/DeployApp (727.71s)