=== RUN TestMultiControlPlane/serial/DeployApp
ha_test.go:128: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- apply -f ./testdata/ha/ha-pod-dns-test.yaml
ha_test.go:133: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- rollout status deployment/busybox
E0917 00:00:37.165621 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/addons-346612/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:00:49.959565 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:00:49.965949 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:00:49.977261 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:00:49.998582 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:00:50.039930 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:00:50.121329 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:00:50.282823 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:00:50.604501 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:00:51.246531 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:00:52.528647 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:00:55.091343 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:01:00.212940 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:01:04.871873 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/addons-346612/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:01:10.454509 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:01:30.936721 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:02:11.899991 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:03:33.824343 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:05:37.165924 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/addons-346612/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:05:49.960581 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0917 00:06:17.665714 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/functional-695580/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:133: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-472903 kubectl -- rollout status deployment/busybox: exit status 1 (10m6.101476737s)
-- stdout --
Waiting for deployment "busybox" rollout to finish: 0 of 6 updated replicas are available...
Waiting for deployment "busybox" rollout to finish: 0 out of 3 new replicas have been updated...
Waiting for deployment "busybox" rollout to finish: 0 of 6 updated replicas are available...
Waiting for deployment "busybox" rollout to finish: 0 of 3 updated replicas are available...
Waiting for deployment "busybox" rollout to finish: 1 of 3 updated replicas are available...
Waiting for deployment "busybox" rollout to finish: 2 of 3 updated replicas are available...
-- /stdout --
** stderr **
error: deployment "busybox" exceeded its progress deadline
** /stderr **
ha_test.go:135: failed to deploy busybox to ha (multi-control plane) cluster
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:08:44.133128 752707 retry.go:31] will retry after 1.232951173s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:08:45.477139 752707 retry.go:31] will retry after 1.478633877s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:08:47.066267 752707 retry.go:31] will retry after 2.434809372s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:08:49.614564 752707 retry.go:31] will retry after 3.42692877s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:08:53.156516 752707 retry.go:31] will retry after 2.581888882s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:08:55.853873 752707 retry.go:31] will retry after 9.102938056s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:09:05.075104 752707 retry.go:31] will retry after 8.755033071s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:09:13.945883 752707 retry.go:31] will retry after 8.673554633s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:09:22.733937 752707 retry.go:31] will retry after 33.880920566s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:09:56.737430 752707 retry.go:31] will retry after 44.806125277s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
E0917 00:10:37.165575 752707 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/addons-346612/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:159: failed to resolve pod IPs: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:163: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- get pods -o jsonpath='{.items[*].metadata.name}'
ha_test.go:171: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- exec busybox-7b57f96db7-4jfjt -- nslookup kubernetes.io
ha_test.go:171: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- exec busybox-7b57f96db7-6hrm6 -- nslookup kubernetes.io
ha_test.go:171: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- exec busybox-7b57f96db7-mknzs -- nslookup kubernetes.io
ha_test.go:171: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-472903 kubectl -- exec busybox-7b57f96db7-mknzs -- nslookup kubernetes.io: exit status 1 (122.871883ms)
** stderr **
error: Internal error occurred: unable to upgrade connection: container not found ("busybox")
** /stderr **
ha_test.go:173: Pod busybox-7b57f96db7-mknzs could not resolve 'kubernetes.io': exit status 1
ha_test.go:181: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- exec busybox-7b57f96db7-4jfjt -- nslookup kubernetes.default
ha_test.go:181: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- exec busybox-7b57f96db7-6hrm6 -- nslookup kubernetes.default
ha_test.go:181: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- exec busybox-7b57f96db7-mknzs -- nslookup kubernetes.default
ha_test.go:181: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-472903 kubectl -- exec busybox-7b57f96db7-mknzs -- nslookup kubernetes.default: exit status 1 (120.513792ms)
** stderr **
error: Internal error occurred: unable to upgrade connection: container not found ("busybox")
** /stderr **
ha_test.go:183: Pod busybox-7b57f96db7-mknzs could not resolve 'kubernetes.default': exit status 1
ha_test.go:189: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- exec busybox-7b57f96db7-4jfjt -- nslookup kubernetes.default.svc.cluster.local
ha_test.go:189: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- exec busybox-7b57f96db7-6hrm6 -- nslookup kubernetes.default.svc.cluster.local
ha_test.go:189: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 kubectl -- exec busybox-7b57f96db7-mknzs -- nslookup kubernetes.default.svc.cluster.local
ha_test.go:189: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-472903 kubectl -- exec busybox-7b57f96db7-mknzs -- nslookup kubernetes.default.svc.cluster.local: exit status 1 (119.873118ms)
** stderr **
error: Internal error occurred: unable to upgrade connection: container not found ("busybox")
** /stderr **
ha_test.go:191: Pod busybox-7b57f96db7-mknzs could not resolve local service (kubernetes.default.svc.cluster.local): exit status 1
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestMultiControlPlane/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestMultiControlPlane/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect ha-472903
helpers_test.go:243: (dbg) docker inspect ha-472903:
-- stdout --
[
{
"Id": "05f03528ecc5ba6a39041bcc2845d236679d61fa3752c15e7e068dac7d8c9047",
"Created": "2025-09-16T23:56:35.178831158Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 804802,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-09-16T23:56:35.209552026Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:c6b5532e987b5b4f5fc9cb0336e378ed49c0542bad8cbfc564b71e977a6269de",
"ResolvConfPath": "/var/lib/docker/containers/05f03528ecc5ba6a39041bcc2845d236679d61fa3752c15e7e068dac7d8c9047/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/05f03528ecc5ba6a39041bcc2845d236679d61fa3752c15e7e068dac7d8c9047/hostname",
"HostsPath": "/var/lib/docker/containers/05f03528ecc5ba6a39041bcc2845d236679d61fa3752c15e7e068dac7d8c9047/hosts",
"LogPath": "/var/lib/docker/containers/05f03528ecc5ba6a39041bcc2845d236679d61fa3752c15e7e068dac7d8c9047/05f03528ecc5ba6a39041bcc2845d236679d61fa3752c15e7e068dac7d8c9047-json.log",
"Name": "/ha-472903",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"ha-472903:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "ha-472903",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "05f03528ecc5ba6a39041bcc2845d236679d61fa3752c15e7e068dac7d8c9047",
"LowerDir": "/var/lib/docker/overlay2/37229b42d46c992f89d690b880f5a9c43e154eecc2ad5aeee133e9eb30accccb-init/diff:/var/lib/docker/overlay2/949a3fbecd0c2c005aa419b7ddc214e7bf4333225d7b227e8b0d0ea188b945ec/diff",
"MergedDir": "/var/lib/docker/overlay2/37229b42d46c992f89d690b880f5a9c43e154eecc2ad5aeee133e9eb30accccb/merged",
"UpperDir": "/var/lib/docker/overlay2/37229b42d46c992f89d690b880f5a9c43e154eecc2ad5aeee133e9eb30accccb/diff",
"WorkDir": "/var/lib/docker/overlay2/37229b42d46c992f89d690b880f5a9c43e154eecc2ad5aeee133e9eb30accccb/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "ha-472903",
"Source": "/var/lib/docker/volumes/ha-472903/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "ha-472903",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "ha-472903",
"name.minikube.sigs.k8s.io": "ha-472903",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "abe382ce28757e80b5cdae91a64217d3672b21c23f3517480bd53105aeca147e",
"SandboxKey": "/var/run/docker/netns/abe382ce2875",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33544"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33545"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33548"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33546"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33547"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"ha-472903": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "7e:42:9f:f6:50:c2",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "22d49b2f397dfabc2a3967bd54b05204a52976e683f65ff07bff00e793040bef",
"EndpointID": "4d4d83129a167c8183e8ef58cc6057f613d8d69adf59710ba6c623d1ff2970c6",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"ha-472903",
"05f03528ecc5"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p ha-472903 -n ha-472903
helpers_test.go:252: <<< TestMultiControlPlane/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestMultiControlPlane/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p ha-472903 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p ha-472903 logs -n 25: (1.093630099s)
helpers_test.go:260: TestMultiControlPlane/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ delete │ -p functional-695580 │ functional-695580 │ jenkins │ v1.37.0 │ 16 Sep 25 23:56 UTC │ 16 Sep 25 23:56 UTC │
│ start │ ha-472903 start --ha --memory 3072 --wait true --alsologtostderr -v 5 --driver=docker --container-runtime=containerd │ ha-472903 │ jenkins │ v1.37.0 │ 16 Sep 25 23:56 UTC │ 16 Sep 25 23:58 UTC │
│ kubectl │ ha-472903 kubectl -- apply -f ./testdata/ha/ha-pod-dns-test.yaml │ ha-472903 │ jenkins │ v1.37.0 │ 16 Sep 25 23:58 UTC │ 16 Sep 25 23:58 UTC │
│ kubectl │ ha-472903 kubectl -- rollout status deployment/busybox │ ha-472903 │ jenkins │ v1.37.0 │ 16 Sep 25 23:58 UTC │ │
│ kubectl │ ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:08 UTC │ 17 Sep 25 00:08 UTC │
│ kubectl │ ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:08 UTC │ 17 Sep 25 00:08 UTC │
│ kubectl │ ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:08 UTC │ 17 Sep 25 00:08 UTC │
│ kubectl │ ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:08 UTC │ 17 Sep 25 00:08 UTC │
│ kubectl │ ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:08 UTC │ 17 Sep 25 00:08 UTC │
│ kubectl │ ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:08 UTC │ 17 Sep 25 00:08 UTC │
│ kubectl │ ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:09 UTC │ 17 Sep 25 00:09 UTC │
│ kubectl │ ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:09 UTC │ 17 Sep 25 00:09 UTC │
│ kubectl │ ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:09 UTC │ 17 Sep 25 00:09 UTC │
│ kubectl │ ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:09 UTC │ 17 Sep 25 00:09 UTC │
│ kubectl │ ha-472903 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:10 UTC │ 17 Sep 25 00:10 UTC │
│ kubectl │ ha-472903 kubectl -- get pods -o jsonpath='{.items[*].metadata.name}' │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:10 UTC │ 17 Sep 25 00:10 UTC │
│ kubectl │ ha-472903 kubectl -- exec busybox-7b57f96db7-4jfjt -- nslookup kubernetes.io │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:10 UTC │ 17 Sep 25 00:10 UTC │
│ kubectl │ ha-472903 kubectl -- exec busybox-7b57f96db7-6hrm6 -- nslookup kubernetes.io │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:10 UTC │ 17 Sep 25 00:10 UTC │
│ kubectl │ ha-472903 kubectl -- exec busybox-7b57f96db7-mknzs -- nslookup kubernetes.io │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:10 UTC │ │
│ kubectl │ ha-472903 kubectl -- exec busybox-7b57f96db7-4jfjt -- nslookup kubernetes.default │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:10 UTC │ 17 Sep 25 00:10 UTC │
│ kubectl │ ha-472903 kubectl -- exec busybox-7b57f96db7-6hrm6 -- nslookup kubernetes.default │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:10 UTC │ 17 Sep 25 00:10 UTC │
│ kubectl │ ha-472903 kubectl -- exec busybox-7b57f96db7-mknzs -- nslookup kubernetes.default │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:10 UTC │ │
│ kubectl │ ha-472903 kubectl -- exec busybox-7b57f96db7-4jfjt -- nslookup kubernetes.default.svc.cluster.local │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:10 UTC │ 17 Sep 25 00:10 UTC │
│ kubectl │ ha-472903 kubectl -- exec busybox-7b57f96db7-6hrm6 -- nslookup kubernetes.default.svc.cluster.local │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:10 UTC │ 17 Sep 25 00:10 UTC │
│ kubectl │ ha-472903 kubectl -- exec busybox-7b57f96db7-mknzs -- nslookup kubernetes.default.svc.cluster.local │ ha-472903 │ jenkins │ v1.37.0 │ 17 Sep 25 00:10 UTC │ │
└─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/09/16 23:56:30
Running on machine: ubuntu-20-agent-6
Binary: Built with gc go1.24.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0916 23:56:30.301112 804231 out.go:360] Setting OutFile to fd 1 ...
I0916 23:56:30.301322 804231 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0916 23:56:30.301330 804231 out.go:374] Setting ErrFile to fd 2...
I0916 23:56:30.301335 804231 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0916 23:56:30.301535 804231 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-749120/.minikube/bin
I0916 23:56:30.302024 804231 out.go:368] Setting JSON to false
I0916 23:56:30.302925 804231 start.go:130] hostinfo: {"hostname":"ubuntu-20-agent-6","uptime":9532,"bootTime":1758057458,"procs":201,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1037-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0916 23:56:30.303027 804231 start.go:140] virtualization: kvm guest
I0916 23:56:30.304965 804231 out.go:179] * [ha-472903] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I0916 23:56:30.306181 804231 out.go:179] - MINIKUBE_LOCATION=21550
I0916 23:56:30.306189 804231 notify.go:220] Checking for updates...
I0916 23:56:30.308309 804231 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0916 23:56:30.309530 804231 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21550-749120/kubeconfig
I0916 23:56:30.310577 804231 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-749120/.minikube
I0916 23:56:30.311523 804231 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I0916 23:56:30.312490 804231 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I0916 23:56:30.313634 804231 driver.go:421] Setting default libvirt URI to qemu:///system
I0916 23:56:30.336203 804231 docker.go:123] docker version: linux-28.4.0:Docker Engine - Community
I0916 23:56:30.336330 804231 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0916 23:56:30.390690 804231 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:false NGoroutines:46 SystemTime:2025-09-16 23:56:30.380521507 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.8.0-1037-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:
x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652178944 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:28.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.27.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.39.2] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner (EXPERIMENTAL) Vendor:Docker Inc. Version:v0.1.39] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0916 23:56:30.390801 804231 docker.go:318] overlay module found
I0916 23:56:30.392435 804231 out.go:179] * Using the docker driver based on user configuration
I0916 23:56:30.393493 804231 start.go:304] selected driver: docker
I0916 23:56:30.393505 804231 start.go:918] validating driver "docker" against <nil>
I0916 23:56:30.393517 804231 start.go:929] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0916 23:56:30.394092 804231 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0916 23:56:30.448140 804231 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:false NGoroutines:46 SystemTime:2025-09-16 23:56:30.438500908 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.8.0-1037-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:
x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652178944 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:28.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.27.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.39.2] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner (EXPERIMENTAL) Vendor:Docker Inc. Version:v0.1.39] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0916 23:56:30.448302 804231 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I0916 23:56:30.448529 804231 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0916 23:56:30.450143 804231 out.go:179] * Using Docker driver with root privileges
I0916 23:56:30.451156 804231 cni.go:84] Creating CNI manager for ""
I0916 23:56:30.451216 804231 cni.go:136] multinode detected (0 nodes found), recommending kindnet
I0916 23:56:30.451226 804231 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I0916 23:56:30.451301 804231 start.go:348] cluster config:
{Name:ha-472903 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-472903 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPl
ugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m
0s}
I0916 23:56:30.452491 804231 out.go:179] * Starting "ha-472903" primary control-plane node in "ha-472903" cluster
I0916 23:56:30.453469 804231 cache.go:123] Beginning downloading kic base image for docker with containerd
I0916 23:56:30.454617 804231 out.go:179] * Pulling base image v0.0.48 ...
I0916 23:56:30.455626 804231 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0916 23:56:30.455658 804231 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21550-749120/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4
I0916 23:56:30.455669 804231 cache.go:58] Caching tarball of preloaded images
I0916 23:56:30.455737 804231 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0916 23:56:30.455747 804231 preload.go:172] Found /home/jenkins/minikube-integration/21550-749120/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I0916 23:56:30.455875 804231 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on containerd
I0916 23:56:30.456208 804231 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/config.json ...
I0916 23:56:30.456245 804231 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/config.json: {Name:mkb16495f6ef626fa58a9600f3b4a943b5aaf14d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:56:30.475568 804231 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0916 23:56:30.475587 804231 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0916 23:56:30.475611 804231 cache.go:232] Successfully downloaded all kic artifacts
I0916 23:56:30.475644 804231 start.go:360] acquireMachinesLock for ha-472903: {Name:mk994658ce3314f2aed1dec341debc49d36a4326 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0916 23:56:30.475759 804231 start.go:364] duration metric: took 97.738µs to acquireMachinesLock for "ha-472903"
I0916 23:56:30.475786 804231 start.go:93] Provisioning new machine with config: &{Name:ha-472903 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-472903 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APISer
verIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath
: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0916 23:56:30.475881 804231 start.go:125] createHost starting for "" (driver="docker")
I0916 23:56:30.477680 804231 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0916 23:56:30.477953 804231 start.go:159] libmachine.API.Create for "ha-472903" (driver="docker")
I0916 23:56:30.477986 804231 client.go:168] LocalClient.Create starting
I0916 23:56:30.478060 804231 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem
I0916 23:56:30.478097 804231 main.go:141] libmachine: Decoding PEM data...
I0916 23:56:30.478118 804231 main.go:141] libmachine: Parsing certificate...
I0916 23:56:30.478203 804231 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21550-749120/.minikube/certs/cert.pem
I0916 23:56:30.478234 804231 main.go:141] libmachine: Decoding PEM data...
I0916 23:56:30.478247 804231 main.go:141] libmachine: Parsing certificate...
I0916 23:56:30.478706 804231 cli_runner.go:164] Run: docker network inspect ha-472903 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0916 23:56:30.494743 804231 cli_runner.go:211] docker network inspect ha-472903 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0916 23:56:30.494806 804231 network_create.go:284] running [docker network inspect ha-472903] to gather additional debugging logs...
I0916 23:56:30.494829 804231 cli_runner.go:164] Run: docker network inspect ha-472903
W0916 23:56:30.510851 804231 cli_runner.go:211] docker network inspect ha-472903 returned with exit code 1
I0916 23:56:30.510886 804231 network_create.go:287] error running [docker network inspect ha-472903]: docker network inspect ha-472903: exit status 1
stdout:
[]
stderr:
Error response from daemon: network ha-472903 not found
I0916 23:56:30.510919 804231 network_create.go:289] output of [docker network inspect ha-472903]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network ha-472903 not found
** /stderr **
I0916 23:56:30.511007 804231 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0916 23:56:30.527272 804231 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001b12870}
I0916 23:56:30.527312 804231 network_create.go:124] attempt to create docker network ha-472903 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0916 23:56:30.527357 804231 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=ha-472903 ha-472903
I0916 23:56:30.581246 804231 network_create.go:108] docker network ha-472903 192.168.49.0/24 created
I0916 23:56:30.581278 804231 kic.go:121] calculated static IP "192.168.49.2" for the "ha-472903" container
I0916 23:56:30.581331 804231 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0916 23:56:30.597113 804231 cli_runner.go:164] Run: docker volume create ha-472903 --label name.minikube.sigs.k8s.io=ha-472903 --label created_by.minikube.sigs.k8s.io=true
I0916 23:56:30.614615 804231 oci.go:103] Successfully created a docker volume ha-472903
I0916 23:56:30.614694 804231 cli_runner.go:164] Run: docker run --rm --name ha-472903-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-472903 --entrypoint /usr/bin/test -v ha-472903:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0916 23:56:30.983301 804231 oci.go:107] Successfully prepared a docker volume ha-472903
I0916 23:56:30.983346 804231 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0916 23:56:30.983369 804231 kic.go:194] Starting extracting preloaded images to volume ...
I0916 23:56:30.983457 804231 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-749120/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-472903:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0916 23:56:35.109877 804231 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-749120/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-472903:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (4.126378793s)
I0916 23:56:35.109930 804231 kic.go:203] duration metric: took 4.126557088s to extract preloaded images to volume ...
W0916 23:56:35.110010 804231 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0916 23:56:35.110041 804231 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0916 23:56:35.110081 804231 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0916 23:56:35.162423 804231 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ha-472903 --name ha-472903 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-472903 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ha-472903 --network ha-472903 --ip 192.168.49.2 --volume ha-472903:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0916 23:56:35.411448 804231 cli_runner.go:164] Run: docker container inspect ha-472903 --format={{.State.Running}}
I0916 23:56:35.428877 804231 cli_runner.go:164] Run: docker container inspect ha-472903 --format={{.State.Status}}
I0916 23:56:35.447492 804231 cli_runner.go:164] Run: docker exec ha-472903 stat /var/lib/dpkg/alternatives/iptables
I0916 23:56:35.490145 804231 oci.go:144] the created container "ha-472903" has a running status.
I0916 23:56:35.490177 804231 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903/id_rsa...
I0916 23:56:35.748917 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0916 23:56:35.748974 804231 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0916 23:56:35.776040 804231 cli_runner.go:164] Run: docker container inspect ha-472903 --format={{.State.Status}}
I0916 23:56:35.795374 804231 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0916 23:56:35.795403 804231 kic_runner.go:114] Args: [docker exec --privileged ha-472903 chown docker:docker /home/docker/.ssh/authorized_keys]
I0916 23:56:35.841194 804231 cli_runner.go:164] Run: docker container inspect ha-472903 --format={{.State.Status}}
I0916 23:56:35.859165 804231 machine.go:93] provisionDockerMachine start ...
I0916 23:56:35.859278 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903
I0916 23:56:35.877348 804231 main.go:141] libmachine: Using SSH client type: native
I0916 23:56:35.877637 804231 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 33544 <nil> <nil>}
I0916 23:56:35.877654 804231 main.go:141] libmachine: About to run SSH command:
hostname
I0916 23:56:36.014327 804231 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-472903
I0916 23:56:36.014356 804231 ubuntu.go:182] provisioning hostname "ha-472903"
I0916 23:56:36.014430 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903
I0916 23:56:36.033295 804231 main.go:141] libmachine: Using SSH client type: native
I0916 23:56:36.033543 804231 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 33544 <nil> <nil>}
I0916 23:56:36.033558 804231 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-472903 && echo "ha-472903" | sudo tee /etc/hostname
I0916 23:56:36.178557 804231 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-472903
I0916 23:56:36.178627 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903
I0916 23:56:36.196584 804231 main.go:141] libmachine: Using SSH client type: native
I0916 23:56:36.196791 804231 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 33544 <nil> <nil>}
I0916 23:56:36.196814 804231 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-472903' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-472903/g' /etc/hosts;
else
echo '127.0.1.1 ha-472903' | sudo tee -a /etc/hosts;
fi
fi
I0916 23:56:36.331895 804231 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0916 23:56:36.331954 804231 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21550-749120/.minikube CaCertPath:/home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21550-749120/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21550-749120/.minikube}
I0916 23:56:36.331987 804231 ubuntu.go:190] setting up certificates
I0916 23:56:36.332000 804231 provision.go:84] configureAuth start
I0916 23:56:36.332062 804231 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-472903
I0916 23:56:36.350923 804231 provision.go:143] copyHostCerts
I0916 23:56:36.350968 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/21550-749120/.minikube/ca.pem
I0916 23:56:36.351011 804231 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-749120/.minikube/ca.pem, removing ...
I0916 23:56:36.351021 804231 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-749120/.minikube/ca.pem
I0916 23:56:36.351100 804231 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21550-749120/.minikube/ca.pem (1078 bytes)
I0916 23:56:36.351216 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/21550-749120/.minikube/cert.pem
I0916 23:56:36.351254 804231 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-749120/.minikube/cert.pem, removing ...
I0916 23:56:36.351265 804231 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-749120/.minikube/cert.pem
I0916 23:56:36.351307 804231 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21550-749120/.minikube/cert.pem (1123 bytes)
I0916 23:56:36.351374 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/21550-749120/.minikube/key.pem
I0916 23:56:36.351400 804231 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-749120/.minikube/key.pem, removing ...
I0916 23:56:36.351409 804231 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-749120/.minikube/key.pem
I0916 23:56:36.351461 804231 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21550-749120/.minikube/key.pem (1675 bytes)
I0916 23:56:36.351538 804231 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21550-749120/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca-key.pem org=jenkins.ha-472903 san=[127.0.0.1 192.168.49.2 ha-472903 localhost minikube]
I0916 23:56:36.406870 804231 provision.go:177] copyRemoteCerts
I0916 23:56:36.406927 804231 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0916 23:56:36.406977 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903
I0916 23:56:36.424064 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33544 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903/id_rsa Username:docker}
I0916 23:56:36.520663 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0916 23:56:36.520737 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0916 23:56:36.546100 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/machines/server.pem -> /etc/docker/server.pem
I0916 23:56:36.546162 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/machines/server.pem --> /etc/docker/server.pem (1200 bytes)
I0916 23:56:36.569886 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0916 23:56:36.569946 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0916 23:56:36.593694 804231 provision.go:87] duration metric: took 261.676108ms to configureAuth
I0916 23:56:36.593725 804231 ubuntu.go:206] setting minikube options for container-runtime
I0916 23:56:36.593891 804231 config.go:182] Loaded profile config "ha-472903": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0916 23:56:36.593903 804231 machine.go:96] duration metric: took 734.71199ms to provisionDockerMachine
I0916 23:56:36.593911 804231 client.go:171] duration metric: took 6.115914604s to LocalClient.Create
I0916 23:56:36.593933 804231 start.go:167] duration metric: took 6.115991162s to libmachine.API.Create "ha-472903"
I0916 23:56:36.593942 804231 start.go:293] postStartSetup for "ha-472903" (driver="docker")
I0916 23:56:36.593950 804231 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0916 23:56:36.593994 804231 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0916 23:56:36.594038 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903
I0916 23:56:36.611127 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33544 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903/id_rsa Username:docker}
I0916 23:56:36.708294 804231 ssh_runner.go:195] Run: cat /etc/os-release
I0916 23:56:36.711629 804231 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0916 23:56:36.711662 804231 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0916 23:56:36.711669 804231 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0916 23:56:36.711677 804231 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0916 23:56:36.711690 804231 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-749120/.minikube/addons for local assets ...
I0916 23:56:36.711734 804231 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-749120/.minikube/files for local assets ...
I0916 23:56:36.711817 804231 filesync.go:149] local asset: /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem -> 7527072.pem in /etc/ssl/certs
I0916 23:56:36.711829 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem -> /etc/ssl/certs/7527072.pem
I0916 23:56:36.711917 804231 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0916 23:56:36.720521 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem --> /etc/ssl/certs/7527072.pem (1708 bytes)
I0916 23:56:36.746614 804231 start.go:296] duration metric: took 152.657806ms for postStartSetup
I0916 23:56:36.746970 804231 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-472903
I0916 23:56:36.763912 804231 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/config.json ...
I0916 23:56:36.764159 804231 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0916 23:56:36.764204 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903
I0916 23:56:36.781099 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33544 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903/id_rsa Username:docker}
I0916 23:56:36.872372 804231 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0916 23:56:36.876670 804231 start.go:128] duration metric: took 6.400768235s to createHost
I0916 23:56:36.876701 804231 start.go:83] releasing machines lock for "ha-472903", held for 6.400928988s
I0916 23:56:36.876787 804231 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-472903
I0916 23:56:36.894080 804231 ssh_runner.go:195] Run: cat /version.json
I0916 23:56:36.894094 804231 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0916 23:56:36.894141 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903
I0916 23:56:36.894182 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903
I0916 23:56:36.912628 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33544 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903/id_rsa Username:docker}
I0916 23:56:36.913001 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33544 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903/id_rsa Username:docker}
I0916 23:56:37.079386 804231 ssh_runner.go:195] Run: systemctl --version
I0916 23:56:37.084104 804231 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0916 23:56:37.088563 804231 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0916 23:56:37.116786 804231 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0916 23:56:37.116846 804231 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0916 23:56:37.142716 804231 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0916 23:56:37.142738 804231 start.go:495] detecting cgroup driver to use...
I0916 23:56:37.142772 804231 detect.go:190] detected "systemd" cgroup driver on host os
I0916 23:56:37.142832 804231 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0916 23:56:37.154693 804231 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0916 23:56:37.165920 804231 docker.go:218] disabling cri-docker service (if available) ...
I0916 23:56:37.165978 804231 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0916 23:56:37.179227 804231 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0916 23:56:37.192751 804231 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0916 23:56:37.255915 804231 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0916 23:56:37.324761 804231 docker.go:234] disabling docker service ...
I0916 23:56:37.324836 804231 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0916 23:56:37.342233 804231 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0916 23:56:37.353324 804231 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0916 23:56:37.420555 804231 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0916 23:56:37.486396 804231 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0916 23:56:37.497453 804231 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0916 23:56:37.513435 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0916 23:56:37.524399 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0916 23:56:37.534072 804231 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0916 23:56:37.534132 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0916 23:56:37.543872 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0916 23:56:37.553478 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0916 23:56:37.562918 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0916 23:56:37.572431 804231 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0916 23:56:37.581176 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0916 23:56:37.590540 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0916 23:56:37.599825 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0916 23:56:37.609340 804231 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0916 23:56:37.617500 804231 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0916 23:56:37.625771 804231 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:56:37.685687 804231 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0916 23:56:37.787201 804231 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I0916 23:56:37.787275 804231 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0916 23:56:37.791126 804231 start.go:563] Will wait 60s for crictl version
I0916 23:56:37.791200 804231 ssh_runner.go:195] Run: which crictl
I0916 23:56:37.794684 804231 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0916 23:56:37.828753 804231 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.27
RuntimeApiVersion: v1
I0916 23:56:37.828806 804231 ssh_runner.go:195] Run: containerd --version
I0916 23:56:37.851610 804231 ssh_runner.go:195] Run: containerd --version
I0916 23:56:37.876577 804231 out.go:179] * Preparing Kubernetes v1.34.0 on containerd 1.7.27 ...
I0916 23:56:37.877711 804231 cli_runner.go:164] Run: docker network inspect ha-472903 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0916 23:56:37.894044 804231 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0916 23:56:37.897995 804231 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0916 23:56:37.909702 804231 kubeadm.go:875] updating cluster {Name:ha-472903 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-472903 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIP
s:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetCli
entPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0916 23:56:37.909830 804231 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0916 23:56:37.909936 804231 ssh_runner.go:195] Run: sudo crictl images --output json
I0916 23:56:37.943964 804231 containerd.go:627] all images are preloaded for containerd runtime.
I0916 23:56:37.943985 804231 containerd.go:534] Images already preloaded, skipping extraction
I0916 23:56:37.944040 804231 ssh_runner.go:195] Run: sudo crictl images --output json
I0916 23:56:37.976374 804231 containerd.go:627] all images are preloaded for containerd runtime.
I0916 23:56:37.976397 804231 cache_images.go:85] Images are preloaded, skipping loading
I0916 23:56:37.976405 804231 kubeadm.go:926] updating node { 192.168.49.2 8443 v1.34.0 containerd true true} ...
I0916 23:56:37.976525 804231 kubeadm.go:938] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-472903 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:ha-472903 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0916 23:56:37.976590 804231 ssh_runner.go:195] Run: sudo crictl info
I0916 23:56:38.009585 804231 cni.go:84] Creating CNI manager for ""
I0916 23:56:38.009608 804231 cni.go:136] multinode detected (1 nodes found), recommending kindnet
I0916 23:56:38.009620 804231 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0916 23:56:38.009642 804231 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.34.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:ha-472903 NodeName:ha-472903 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernet
es/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0916 23:56:38.009740 804231 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "ha-472903"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0916 23:56:38.009763 804231 kube-vip.go:115] generating kube-vip config ...
I0916 23:56:38.009799 804231 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I0916 23:56:38.022796 804231 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I0916 23:56:38.022978 804231 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v1.0.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/super-admin.conf"
name: kubeconfig
status: {}
I0916 23:56:38.023041 804231 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0916 23:56:38.032162 804231 binaries.go:44] Found k8s binaries, skipping transfer
I0916 23:56:38.032241 804231 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube /etc/kubernetes/manifests
I0916 23:56:38.040936 804231 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (313 bytes)
I0916 23:56:38.058672 804231 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0916 23:56:38.079097 804231 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2221 bytes)
I0916 23:56:38.097183 804231 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1364 bytes)
I0916 23:56:38.116629 804231 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I0916 23:56:38.120221 804231 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0916 23:56:38.131205 804231 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:56:38.195735 804231 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0916 23:56:38.216649 804231 certs.go:68] Setting up /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903 for IP: 192.168.49.2
I0916 23:56:38.216671 804231 certs.go:194] generating shared ca certs ...
I0916 23:56:38.216692 804231 certs.go:226] acquiring lock for ca certs: {Name:mk87d179b4a631193bd9c86db8034ccf19400cde Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:56:38.216854 804231 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21550-749120/.minikube/ca.key
I0916 23:56:38.216907 804231 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21550-749120/.minikube/proxy-client-ca.key
I0916 23:56:38.216920 804231 certs.go:256] generating profile certs ...
I0916 23:56:38.216989 804231 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.key
I0916 23:56:38.217007 804231 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.crt with IP's: []
I0916 23:56:38.286683 804231 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.crt ...
I0916 23:56:38.286713 804231 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.crt: {Name:mk764ef4ac73429cea14d799835f3822d8afb254 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:56:38.286876 804231 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.key ...
I0916 23:56:38.286887 804231 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.key: {Name:mk988f40b7ad20c61b4ffc19afd15eea50787a6c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:56:38.286965 804231 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key.ef70afe8
I0916 23:56:38.286981 804231 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt.ef70afe8 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.254]
I0916 23:56:38.411782 804231 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt.ef70afe8 ...
I0916 23:56:38.411812 804231 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt.ef70afe8: {Name:mkbca9fcc4cd73eb913b43ef67240975ba048601 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:56:38.411977 804231 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key.ef70afe8 ...
I0916 23:56:38.411990 804231 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key.ef70afe8: {Name:mk56f7fb29011c6372caaf96dfdbcab1b202e8b2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:56:38.412061 804231 certs.go:381] copying /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt.ef70afe8 -> /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt
I0916 23:56:38.412138 804231 certs.go:385] copying /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key.ef70afe8 -> /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key
I0916 23:56:38.412190 804231 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.key
I0916 23:56:38.412204 804231 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.crt with IP's: []
I0916 23:56:38.735728 804231 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.crt ...
I0916 23:56:38.735759 804231 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.crt: {Name:mke25602938652bbe51197bb8e5738dfc5dca50b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:56:38.735935 804231 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.key ...
I0916 23:56:38.735947 804231 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.key: {Name:mkc7d616357a8be8181d43ca8cb33ab512ce94dd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:56:38.736027 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0916 23:56:38.736044 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0916 23:56:38.736055 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0916 23:56:38.736068 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0916 23:56:38.736078 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0916 23:56:38.736090 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0916 23:56:38.736105 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0916 23:56:38.736115 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0916 23:56:38.736175 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/752707.pem (1338 bytes)
W0916 23:56:38.736210 804231 certs.go:480] ignoring /home/jenkins/minikube-integration/21550-749120/.minikube/certs/752707_empty.pem, impossibly tiny 0 bytes
I0916 23:56:38.736218 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca-key.pem (1675 bytes)
I0916 23:56:38.736242 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem (1078 bytes)
I0916 23:56:38.736266 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/cert.pem (1123 bytes)
I0916 23:56:38.736284 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/key.pem (1675 bytes)
I0916 23:56:38.736322 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem (1708 bytes)
I0916 23:56:38.736347 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem -> /usr/share/ca-certificates/7527072.pem
I0916 23:56:38.736360 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0916 23:56:38.736372 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/752707.pem -> /usr/share/ca-certificates/752707.pem
I0916 23:56:38.736905 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0916 23:56:38.762142 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0916 23:56:38.786590 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0916 23:56:38.810694 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0916 23:56:38.834521 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I0916 23:56:38.858677 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0916 23:56:38.881975 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0916 23:56:38.906146 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0916 23:56:38.929698 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem --> /usr/share/ca-certificates/7527072.pem (1708 bytes)
I0916 23:56:38.955154 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0916 23:56:38.978551 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/certs/752707.pem --> /usr/share/ca-certificates/752707.pem (1338 bytes)
I0916 23:56:39.001782 804231 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0916 23:56:39.019405 804231 ssh_runner.go:195] Run: openssl version
I0916 23:56:39.024868 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/752707.pem && ln -fs /usr/share/ca-certificates/752707.pem /etc/ssl/certs/752707.pem"
I0916 23:56:39.034165 804231 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/752707.pem
I0916 23:56:39.038348 804231 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 16 23:54 /usr/share/ca-certificates/752707.pem
I0916 23:56:39.038407 804231 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/752707.pem
I0916 23:56:39.045172 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/752707.pem /etc/ssl/certs/51391683.0"
I0916 23:56:39.054735 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/7527072.pem && ln -fs /usr/share/ca-certificates/7527072.pem /etc/ssl/certs/7527072.pem"
I0916 23:56:39.065180 804231 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/7527072.pem
I0916 23:56:39.068976 804231 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 16 23:54 /usr/share/ca-certificates/7527072.pem
I0916 23:56:39.069038 804231 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/7527072.pem
I0916 23:56:39.075920 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/7527072.pem /etc/ssl/certs/3ec20f2e.0"
I0916 23:56:39.085838 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0916 23:56:39.095394 804231 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0916 23:56:39.098966 804231 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 16 23:48 /usr/share/ca-certificates/minikubeCA.pem
I0916 23:56:39.099019 804231 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0916 23:56:39.105643 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0916 23:56:39.114800 804231 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0916 23:56:39.117988 804231 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0916 23:56:39.118033 804231 kubeadm.go:392] StartCluster: {Name:ha-472903 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-472903 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[
] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClient
Path: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0916 23:56:39.118097 804231 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I0916 23:56:39.118132 804231 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I0916 23:56:39.154291 804231 cri.go:89] found id: ""
I0916 23:56:39.154361 804231 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0916 23:56:39.163485 804231 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0916 23:56:39.172454 804231 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0916 23:56:39.172499 804231 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0916 23:56:39.181066 804231 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0916 23:56:39.181098 804231 kubeadm.go:157] found existing configuration files:
I0916 23:56:39.181131 804231 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0916 23:56:39.189824 804231 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0916 23:56:39.189873 804231 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0916 23:56:39.198165 804231 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0916 23:56:39.206772 804231 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0916 23:56:39.206819 804231 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0916 23:56:39.215119 804231 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0916 23:56:39.223660 804231 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0916 23:56:39.223717 804231 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0916 23:56:39.232099 804231 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0916 23:56:39.240514 804231 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0916 23:56:39.240559 804231 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0916 23:56:39.248850 804231 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0916 23:56:39.285897 804231 kubeadm.go:310] [init] Using Kubernetes version: v1.34.0
I0916 23:56:39.285950 804231 kubeadm.go:310] [preflight] Running pre-flight checks
I0916 23:56:39.300660 804231 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0916 23:56:39.300727 804231 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1037-gcp[0m
I0916 23:56:39.300801 804231 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0916 23:56:39.300901 804231 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0916 23:56:39.300975 804231 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0916 23:56:39.301037 804231 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0916 23:56:39.301080 804231 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0916 23:56:39.301127 804231 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0916 23:56:39.301169 804231 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0916 23:56:39.301211 804231 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0916 23:56:39.301268 804231 kubeadm.go:310] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I0916 23:56:39.351787 804231 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0916 23:56:39.351909 804231 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0916 23:56:39.351995 804231 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0916 23:56:39.358062 804231 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0916 23:56:39.360794 804231 out.go:252] - Generating certificates and keys ...
I0916 23:56:39.360906 804231 kubeadm.go:310] [certs] Using existing ca certificate authority
I0916 23:56:39.360984 804231 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0916 23:56:39.805287 804231 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0916 23:56:40.002708 804231 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0916 23:56:40.279763 804231 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0916 23:56:40.813028 804231 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0916 23:56:41.074848 804231 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0916 23:56:41.075343 804231 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [ha-472903 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0916 23:56:41.124880 804231 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0916 23:56:41.125041 804231 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [ha-472903 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0916 23:56:41.707716 804231 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0916 23:56:42.089212 804231 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0916 23:56:42.627038 804231 kubeadm.go:310] [certs] Generating "sa" key and public key
I0916 23:56:42.627119 804231 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0916 23:56:42.823901 804231 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0916 23:56:43.022989 804231 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0916 23:56:43.163778 804231 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0916 23:56:43.708743 804231 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0916 23:56:44.024642 804231 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0916 23:56:44.025130 804231 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0916 23:56:44.027319 804231 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0916 23:56:44.029599 804231 out.go:252] - Booting up control plane ...
I0916 23:56:44.029737 804231 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0916 23:56:44.029842 804231 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0916 23:56:44.030181 804231 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0916 23:56:44.039957 804231 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0916 23:56:44.040118 804231 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0916 23:56:44.047794 804231 kubeadm.go:310] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0916 23:56:44.048177 804231 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0916 23:56:44.048269 804231 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0916 23:56:44.122629 804231 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0916 23:56:44.122739 804231 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0916 23:56:45.124352 804231 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.001822735s
I0916 23:56:45.127338 804231 kubeadm.go:310] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I0916 23:56:45.127477 804231 kubeadm.go:310] [control-plane-check] Checking kube-apiserver at https://192.168.49.2:8443/livez
I0916 23:56:45.127582 804231 kubeadm.go:310] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I0916 23:56:45.127694 804231 kubeadm.go:310] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I0916 23:56:47.478256 804231 kubeadm.go:310] [control-plane-check] kube-scheduler is healthy after 2.350892202s
I0916 23:56:47.717698 804231 kubeadm.go:310] [control-plane-check] kube-controller-manager is healthy after 2.590223043s
I0916 23:56:49.129161 804231 kubeadm.go:310] [control-plane-check] kube-apiserver is healthy after 4.001748341s
I0916 23:56:49.140036 804231 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0916 23:56:49.148779 804231 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0916 23:56:49.158010 804231 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0916 23:56:49.158279 804231 kubeadm.go:310] [mark-control-plane] Marking the node ha-472903 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0916 23:56:49.165085 804231 kubeadm.go:310] [bootstrap-token] Using token: 4apri1.yqe8ok7wc4ltba21
I0916 23:56:49.166180 804231 out.go:252] - Configuring RBAC rules ...
I0916 23:56:49.166328 804231 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0916 23:56:49.169225 804231 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0916 23:56:49.174527 804231 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0916 23:56:49.176741 804231 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0916 23:56:49.178892 804231 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0916 23:56:49.181107 804231 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0916 23:56:49.534440 804231 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0916 23:56:49.948567 804231 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0916 23:56:50.534581 804231 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0916 23:56:50.535429 804231 kubeadm.go:310]
I0916 23:56:50.535529 804231 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0916 23:56:50.535542 804231 kubeadm.go:310]
I0916 23:56:50.535650 804231 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0916 23:56:50.535660 804231 kubeadm.go:310]
I0916 23:56:50.535696 804231 kubeadm.go:310] mkdir -p $HOME/.kube
I0916 23:56:50.535801 804231 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0916 23:56:50.535858 804231 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0916 23:56:50.535872 804231 kubeadm.go:310]
I0916 23:56:50.535940 804231 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0916 23:56:50.535949 804231 kubeadm.go:310]
I0916 23:56:50.536027 804231 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0916 23:56:50.536037 804231 kubeadm.go:310]
I0916 23:56:50.536125 804231 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0916 23:56:50.536212 804231 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0916 23:56:50.536280 804231 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0916 23:56:50.536286 804231 kubeadm.go:310]
I0916 23:56:50.536356 804231 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0916 23:56:50.536441 804231 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0916 23:56:50.536448 804231 kubeadm.go:310]
I0916 23:56:50.536543 804231 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token 4apri1.yqe8ok7wc4ltba21 \
I0916 23:56:50.536688 804231 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:52c78ec9ad9a2dc0941e43ce337b864c76ea573e452bc75ed737e69ad76deac1 \
I0916 23:56:50.536722 804231 kubeadm.go:310] --control-plane
I0916 23:56:50.536731 804231 kubeadm.go:310]
I0916 23:56:50.536842 804231 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0916 23:56:50.536857 804231 kubeadm.go:310]
I0916 23:56:50.536947 804231 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token 4apri1.yqe8ok7wc4ltba21 \
I0916 23:56:50.537084 804231 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:52c78ec9ad9a2dc0941e43ce337b864c76ea573e452bc75ed737e69ad76deac1
I0916 23:56:50.539097 804231 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1037-gcp\n", err: exit status 1
I0916 23:56:50.539238 804231 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0916 23:56:50.539264 804231 cni.go:84] Creating CNI manager for ""
I0916 23:56:50.539274 804231 cni.go:136] multinode detected (1 nodes found), recommending kindnet
I0916 23:56:50.540523 804231 out.go:179] * Configuring CNI (Container Networking Interface) ...
I0916 23:56:50.541480 804231 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0916 23:56:50.545518 804231 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.0/kubectl ...
I0916 23:56:50.545534 804231 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I0916 23:56:50.563251 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0916 23:56:50.762002 804231 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0916 23:56:50.762092 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:56:50.762127 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes ha-472903 minikube.k8s.io/updated_at=2025_09_16T23_56_50_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a minikube.k8s.io/name=ha-472903 minikube.k8s.io/primary=true
I0916 23:56:50.771679 804231 ops.go:34] apiserver oom_adj: -16
I0916 23:56:50.843646 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:56:51.344428 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:56:51.844440 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:56:52.344316 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:56:52.844594 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:56:53.343854 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:56:53.844615 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:56:54.344057 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:56:54.844066 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:56:55.344374 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:56:55.844478 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:56:55.927027 804231 kubeadm.go:1105] duration metric: took 5.165002596s to wait for elevateKubeSystemPrivileges
I0916 23:56:55.927062 804231 kubeadm.go:394] duration metric: took 16.809033965s to StartCluster
I0916 23:56:55.927081 804231 settings.go:142] acquiring lock: {Name:mk6c1a5bee23e141aad5180323c16c47ed580ab8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:56:55.927146 804231 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21550-749120/kubeconfig
I0916 23:56:55.927785 804231 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-749120/kubeconfig: {Name:mk937123a8fee18625833b0bd778c4556f6787be Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:56:55.928026 804231 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0916 23:56:55.928018 804231 start.go:233] HA (multi-control plane) cluster: will skip waiting for primary control-plane node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0916 23:56:55.928038 804231 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0916 23:56:55.928103 804231 start.go:241] waiting for startup goroutines ...
I0916 23:56:55.928121 804231 addons.go:69] Setting default-storageclass=true in profile "ha-472903"
I0916 23:56:55.928148 804231 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "ha-472903"
I0916 23:56:55.928126 804231 addons.go:69] Setting storage-provisioner=true in profile "ha-472903"
I0916 23:56:55.928222 804231 addons.go:238] Setting addon storage-provisioner=true in "ha-472903"
I0916 23:56:55.928269 804231 host.go:66] Checking if "ha-472903" exists ...
I0916 23:56:55.928296 804231 config.go:182] Loaded profile config "ha-472903": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0916 23:56:55.928610 804231 cli_runner.go:164] Run: docker container inspect ha-472903 --format={{.State.Status}}
I0916 23:56:55.928740 804231 cli_runner.go:164] Run: docker container inspect ha-472903 --format={{.State.Status}}
I0916 23:56:55.954806 804231 kapi.go:59] client config for ha-472903: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.crt", KeyFile:"/home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.key", CAFile:"/home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4620), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0916 23:56:55.955519 804231 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false
I0916 23:56:55.955545 804231 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false
I0916 23:56:55.955543 804231 cert_rotation.go:141] "Starting client certificate rotation controller" logger="tls-transport-cache"
I0916 23:56:55.955553 804231 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false
I0916 23:56:55.955611 804231 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true
I0916 23:56:55.955620 804231 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false
I0916 23:56:55.956096 804231 addons.go:238] Setting addon default-storageclass=true in "ha-472903"
I0916 23:56:55.956145 804231 host.go:66] Checking if "ha-472903" exists ...
I0916 23:56:55.956685 804231 cli_runner.go:164] Run: docker container inspect ha-472903 --format={{.State.Status}}
I0916 23:56:55.957279 804231 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0916 23:56:55.961536 804231 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0916 23:56:55.961557 804231 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0916 23:56:55.961614 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903
I0916 23:56:55.979896 804231 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I0916 23:56:55.979925 804231 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0916 23:56:55.979985 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903
I0916 23:56:55.982838 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33544 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903/id_rsa Username:docker}
I0916 23:56:55.999402 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33544 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903/id_rsa Username:docker}
I0916 23:56:56.011618 804231 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0916 23:56:56.095355 804231 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0916 23:56:56.110814 804231 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0916 23:56:56.153646 804231 start.go:976] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0916 23:56:56.360175 804231 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I0916 23:56:56.361116 804231 addons.go:514] duration metric: took 433.076562ms for enable addons: enabled=[storage-provisioner default-storageclass]
I0916 23:56:56.361149 804231 start.go:246] waiting for cluster config update ...
I0916 23:56:56.361163 804231 start.go:255] writing updated cluster config ...
I0916 23:56:56.362407 804231 out.go:203]
I0916 23:56:56.363527 804231 config.go:182] Loaded profile config "ha-472903": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0916 23:56:56.363621 804231 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/config.json ...
I0916 23:56:56.364993 804231 out.go:179] * Starting "ha-472903-m02" control-plane node in "ha-472903" cluster
I0916 23:56:56.365873 804231 cache.go:123] Beginning downloading kic base image for docker with containerd
I0916 23:56:56.366751 804231 out.go:179] * Pulling base image v0.0.48 ...
I0916 23:56:56.367539 804231 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0916 23:56:56.367556 804231 cache.go:58] Caching tarball of preloaded images
I0916 23:56:56.367630 804231 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0916 23:56:56.367646 804231 preload.go:172] Found /home/jenkins/minikube-integration/21550-749120/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I0916 23:56:56.367654 804231 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on containerd
I0916 23:56:56.367711 804231 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/config.json ...
I0916 23:56:56.386547 804231 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0916 23:56:56.386565 804231 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0916 23:56:56.386580 804231 cache.go:232] Successfully downloaded all kic artifacts
I0916 23:56:56.386607 804231 start.go:360] acquireMachinesLock for ha-472903-m02: {Name:mk81d8c73856cf84ceff1767a1681f3f3cdab773 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0916 23:56:56.386700 804231 start.go:364] duration metric: took 70.184µs to acquireMachinesLock for "ha-472903-m02"
I0916 23:56:56.386738 804231 start.go:93] Provisioning new machine with config: &{Name:ha-472903 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-472903 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true} {Name:m02 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 Mou
ntOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name:m02 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0916 23:56:56.386824 804231 start.go:125] createHost starting for "m02" (driver="docker")
I0916 23:56:56.388402 804231 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0916 23:56:56.388536 804231 start.go:159] libmachine.API.Create for "ha-472903" (driver="docker")
I0916 23:56:56.388563 804231 client.go:168] LocalClient.Create starting
I0916 23:56:56.388626 804231 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem
I0916 23:56:56.388664 804231 main.go:141] libmachine: Decoding PEM data...
I0916 23:56:56.388687 804231 main.go:141] libmachine: Parsing certificate...
I0916 23:56:56.388757 804231 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21550-749120/.minikube/certs/cert.pem
I0916 23:56:56.388789 804231 main.go:141] libmachine: Decoding PEM data...
I0916 23:56:56.388804 804231 main.go:141] libmachine: Parsing certificate...
I0916 23:56:56.389042 804231 cli_runner.go:164] Run: docker network inspect ha-472903 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0916 23:56:56.404624 804231 network_create.go:77] Found existing network {name:ha-472903 subnet:0xc001d2d140 gateway:[0 0 0 0 0 0 0 0 0 0 255 255 192 168 49 1] mtu:1500}
I0916 23:56:56.404653 804231 kic.go:121] calculated static IP "192.168.49.3" for the "ha-472903-m02" container
I0916 23:56:56.404719 804231 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0916 23:56:56.420231 804231 cli_runner.go:164] Run: docker volume create ha-472903-m02 --label name.minikube.sigs.k8s.io=ha-472903-m02 --label created_by.minikube.sigs.k8s.io=true
I0916 23:56:56.436361 804231 oci.go:103] Successfully created a docker volume ha-472903-m02
I0916 23:56:56.436430 804231 cli_runner.go:164] Run: docker run --rm --name ha-472903-m02-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-472903-m02 --entrypoint /usr/bin/test -v ha-472903-m02:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0916 23:56:56.943375 804231 oci.go:107] Successfully prepared a docker volume ha-472903-m02
I0916 23:56:56.943427 804231 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0916 23:56:56.943455 804231 kic.go:194] Starting extracting preloaded images to volume ...
I0916 23:56:56.943528 804231 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-749120/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-472903-m02:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0916 23:57:01.091161 804231 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-749120/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-472903-m02:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (4.147592491s)
I0916 23:57:01.091197 804231 kic.go:203] duration metric: took 4.147738136s to extract preloaded images to volume ...
W0916 23:57:01.091312 804231 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0916 23:57:01.091355 804231 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0916 23:57:01.091403 804231 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0916 23:57:01.142900 804231 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ha-472903-m02 --name ha-472903-m02 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-472903-m02 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ha-472903-m02 --network ha-472903 --ip 192.168.49.3 --volume ha-472903-m02:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0916 23:57:01.378924 804231 cli_runner.go:164] Run: docker container inspect ha-472903-m02 --format={{.State.Running}}
I0916 23:57:01.396232 804231 cli_runner.go:164] Run: docker container inspect ha-472903-m02 --format={{.State.Status}}
I0916 23:57:01.412927 804231 cli_runner.go:164] Run: docker exec ha-472903-m02 stat /var/lib/dpkg/alternatives/iptables
I0916 23:57:01.469205 804231 oci.go:144] the created container "ha-472903-m02" has a running status.
I0916 23:57:01.469235 804231 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m02/id_rsa...
I0916 23:57:01.517570 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m02/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0916 23:57:01.517621 804231 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m02/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0916 23:57:01.540818 804231 cli_runner.go:164] Run: docker container inspect ha-472903-m02 --format={{.State.Status}}
I0916 23:57:01.560831 804231 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0916 23:57:01.560858 804231 kic_runner.go:114] Args: [docker exec --privileged ha-472903-m02 chown docker:docker /home/docker/.ssh/authorized_keys]
I0916 23:57:01.615037 804231 cli_runner.go:164] Run: docker container inspect ha-472903-m02 --format={{.State.Status}}
I0916 23:57:01.637921 804231 machine.go:93] provisionDockerMachine start ...
I0916 23:57:01.638030 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m02
I0916 23:57:01.659741 804231 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:01.660056 804231 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 33549 <nil> <nil>}
I0916 23:57:01.660078 804231 main.go:141] libmachine: About to run SSH command:
hostname
I0916 23:57:01.800716 804231 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-472903-m02
I0916 23:57:01.800749 804231 ubuntu.go:182] provisioning hostname "ha-472903-m02"
I0916 23:57:01.800817 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m02
I0916 23:57:01.819791 804231 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:01.820013 804231 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 33549 <nil> <nil>}
I0916 23:57:01.820030 804231 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-472903-m02 && echo "ha-472903-m02" | sudo tee /etc/hostname
I0916 23:57:01.967539 804231 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-472903-m02
I0916 23:57:01.967631 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m02
I0916 23:57:01.987814 804231 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:01.988031 804231 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 33549 <nil> <nil>}
I0916 23:57:01.988047 804231 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-472903-m02' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-472903-m02/g' /etc/hosts;
else
echo '127.0.1.1 ha-472903-m02' | sudo tee -a /etc/hosts;
fi
fi
I0916 23:57:02.121536 804231 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0916 23:57:02.121571 804231 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21550-749120/.minikube CaCertPath:/home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21550-749120/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21550-749120/.minikube}
I0916 23:57:02.121588 804231 ubuntu.go:190] setting up certificates
I0916 23:57:02.121602 804231 provision.go:84] configureAuth start
I0916 23:57:02.121663 804231 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-472903-m02
I0916 23:57:02.139056 804231 provision.go:143] copyHostCerts
I0916 23:57:02.139098 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/21550-749120/.minikube/ca.pem
I0916 23:57:02.139135 804231 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-749120/.minikube/ca.pem, removing ...
I0916 23:57:02.139147 804231 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-749120/.minikube/ca.pem
I0916 23:57:02.139221 804231 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21550-749120/.minikube/ca.pem (1078 bytes)
I0916 23:57:02.139329 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/21550-749120/.minikube/cert.pem
I0916 23:57:02.139362 804231 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-749120/.minikube/cert.pem, removing ...
I0916 23:57:02.139372 804231 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-749120/.minikube/cert.pem
I0916 23:57:02.139430 804231 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21550-749120/.minikube/cert.pem (1123 bytes)
I0916 23:57:02.139521 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/21550-749120/.minikube/key.pem
I0916 23:57:02.139549 804231 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-749120/.minikube/key.pem, removing ...
I0916 23:57:02.139559 804231 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-749120/.minikube/key.pem
I0916 23:57:02.139599 804231 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21550-749120/.minikube/key.pem (1675 bytes)
I0916 23:57:02.139690 804231 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21550-749120/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca-key.pem org=jenkins.ha-472903-m02 san=[127.0.0.1 192.168.49.3 ha-472903-m02 localhost minikube]
I0916 23:57:02.262354 804231 provision.go:177] copyRemoteCerts
I0916 23:57:02.262428 804231 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0916 23:57:02.262491 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m02
I0916 23:57:02.279792 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33549 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m02/id_rsa Username:docker}
I0916 23:57:02.375833 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0916 23:57:02.375903 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0916 23:57:02.400316 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/machines/server.pem -> /etc/docker/server.pem
I0916 23:57:02.400373 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0916 23:57:02.422506 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0916 23:57:02.422550 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0916 23:57:02.445091 804231 provision.go:87] duration metric: took 323.464176ms to configureAuth
I0916 23:57:02.445121 804231 ubuntu.go:206] setting minikube options for container-runtime
I0916 23:57:02.445295 804231 config.go:182] Loaded profile config "ha-472903": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0916 23:57:02.445313 804231 machine.go:96] duration metric: took 807.372883ms to provisionDockerMachine
I0916 23:57:02.445320 804231 client.go:171] duration metric: took 6.056751196s to LocalClient.Create
I0916 23:57:02.445337 804231 start.go:167] duration metric: took 6.056804276s to libmachine.API.Create "ha-472903"
I0916 23:57:02.445346 804231 start.go:293] postStartSetup for "ha-472903-m02" (driver="docker")
I0916 23:57:02.445354 804231 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0916 23:57:02.445402 804231 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0916 23:57:02.445461 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m02
I0916 23:57:02.463550 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33549 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m02/id_rsa Username:docker}
I0916 23:57:02.559528 804231 ssh_runner.go:195] Run: cat /etc/os-release
I0916 23:57:02.562755 804231 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0916 23:57:02.562780 804231 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0916 23:57:02.562787 804231 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0916 23:57:02.562793 804231 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0916 23:57:02.562803 804231 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-749120/.minikube/addons for local assets ...
I0916 23:57:02.562847 804231 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-749120/.minikube/files for local assets ...
I0916 23:57:02.562920 804231 filesync.go:149] local asset: /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem -> 7527072.pem in /etc/ssl/certs
I0916 23:57:02.562930 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem -> /etc/ssl/certs/7527072.pem
I0916 23:57:02.563018 804231 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0916 23:57:02.571142 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem --> /etc/ssl/certs/7527072.pem (1708 bytes)
I0916 23:57:02.596466 804231 start.go:296] duration metric: took 151.106324ms for postStartSetup
I0916 23:57:02.596768 804231 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-472903-m02
I0916 23:57:02.613316 804231 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/config.json ...
I0916 23:57:02.613561 804231 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0916 23:57:02.613601 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m02
I0916 23:57:02.632056 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33549 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m02/id_rsa Username:docker}
I0916 23:57:02.723085 804231 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0916 23:57:02.727430 804231 start.go:128] duration metric: took 6.340577447s to createHost
I0916 23:57:02.727453 804231 start.go:83] releasing machines lock for "ha-472903-m02", held for 6.34073897s
I0916 23:57:02.727519 804231 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-472903-m02
I0916 23:57:02.746152 804231 out.go:179] * Found network options:
I0916 23:57:02.747248 804231 out.go:179] - NO_PROXY=192.168.49.2
W0916 23:57:02.748187 804231 proxy.go:120] fail to check proxy env: Error ip not in block
W0916 23:57:02.748240 804231 proxy.go:120] fail to check proxy env: Error ip not in block
I0916 23:57:02.748311 804231 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0916 23:57:02.748360 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m02
I0916 23:57:02.748367 804231 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0916 23:57:02.748427 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m02
I0916 23:57:02.765286 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33549 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m02/id_rsa Username:docker}
I0916 23:57:02.766625 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33549 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m02/id_rsa Username:docker}
I0916 23:57:02.856922 804231 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0916 23:57:02.936692 804231 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0916 23:57:02.936761 804231 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0916 23:57:02.961822 804231 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0916 23:57:02.961845 804231 start.go:495] detecting cgroup driver to use...
I0916 23:57:02.961878 804231 detect.go:190] detected "systemd" cgroup driver on host os
I0916 23:57:02.961919 804231 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0916 23:57:02.973318 804231 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0916 23:57:02.983927 804231 docker.go:218] disabling cri-docker service (if available) ...
I0916 23:57:02.983969 804231 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0916 23:57:02.996091 804231 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0916 23:57:03.009314 804231 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0916 23:57:03.072565 804231 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0916 23:57:03.140469 804231 docker.go:234] disabling docker service ...
I0916 23:57:03.140526 804231 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0916 23:57:03.157179 804231 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0916 23:57:03.167955 804231 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0916 23:57:03.233386 804231 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0916 23:57:03.296537 804231 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0916 23:57:03.307574 804231 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0916 23:57:03.323754 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0916 23:57:03.334305 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0916 23:57:03.343767 804231 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0916 23:57:03.343826 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0916 23:57:03.353029 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0916 23:57:03.361991 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0916 23:57:03.371206 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0916 23:57:03.380598 804231 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0916 23:57:03.389216 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0916 23:57:03.398125 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0916 23:57:03.407145 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0916 23:57:03.416183 804231 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0916 23:57:03.424123 804231 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0916 23:57:03.432185 804231 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:03.493561 804231 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0916 23:57:03.591942 804231 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I0916 23:57:03.592010 804231 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0916 23:57:03.595710 804231 start.go:563] Will wait 60s for crictl version
I0916 23:57:03.595768 804231 ssh_runner.go:195] Run: which crictl
I0916 23:57:03.599108 804231 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0916 23:57:03.633181 804231 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.27
RuntimeApiVersion: v1
I0916 23:57:03.633231 804231 ssh_runner.go:195] Run: containerd --version
I0916 23:57:03.656364 804231 ssh_runner.go:195] Run: containerd --version
I0916 23:57:03.680150 804231 out.go:179] * Preparing Kubernetes v1.34.0 on containerd 1.7.27 ...
I0916 23:57:03.681177 804231 out.go:179] - env NO_PROXY=192.168.49.2
I0916 23:57:03.682053 804231 cli_runner.go:164] Run: docker network inspect ha-472903 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0916 23:57:03.699720 804231 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0916 23:57:03.703306 804231 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0916 23:57:03.714275 804231 mustload.go:65] Loading cluster: ha-472903
I0916 23:57:03.714452 804231 config.go:182] Loaded profile config "ha-472903": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0916 23:57:03.714650 804231 cli_runner.go:164] Run: docker container inspect ha-472903 --format={{.State.Status}}
I0916 23:57:03.730631 804231 host.go:66] Checking if "ha-472903" exists ...
I0916 23:57:03.730849 804231 certs.go:68] Setting up /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903 for IP: 192.168.49.3
I0916 23:57:03.730859 804231 certs.go:194] generating shared ca certs ...
I0916 23:57:03.730877 804231 certs.go:226] acquiring lock for ca certs: {Name:mk87d179b4a631193bd9c86db8034ccf19400cde Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:03.730987 804231 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21550-749120/.minikube/ca.key
I0916 23:57:03.731023 804231 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21550-749120/.minikube/proxy-client-ca.key
I0916 23:57:03.731032 804231 certs.go:256] generating profile certs ...
I0916 23:57:03.731092 804231 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.key
I0916 23:57:03.731114 804231 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key.d67fba4a
I0916 23:57:03.731125 804231 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt.d67fba4a with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.3 192.168.49.254]
I0916 23:57:03.830248 804231 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt.d67fba4a ...
I0916 23:57:03.830275 804231 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt.d67fba4a: {Name:mk3e97859392ca0d50685e4c31c19acd3c590753 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:03.830438 804231 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key.d67fba4a ...
I0916 23:57:03.830453 804231 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key.d67fba4a: {Name:mkd3ec6288ef831df369d4ec39839c410f5116ff Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:03.830530 804231 certs.go:381] copying /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt.d67fba4a -> /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt
I0916 23:57:03.830653 804231 certs.go:385] copying /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key.d67fba4a -> /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key
I0916 23:57:03.830779 804231 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.key
I0916 23:57:03.830794 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0916 23:57:03.830809 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0916 23:57:03.830823 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0916 23:57:03.830836 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0916 23:57:03.830846 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0916 23:57:03.830855 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0916 23:57:03.830864 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0916 23:57:03.830873 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0916 23:57:03.830920 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/752707.pem (1338 bytes)
W0916 23:57:03.830952 804231 certs.go:480] ignoring /home/jenkins/minikube-integration/21550-749120/.minikube/certs/752707_empty.pem, impossibly tiny 0 bytes
I0916 23:57:03.830962 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca-key.pem (1675 bytes)
I0916 23:57:03.830981 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem (1078 bytes)
I0916 23:57:03.831001 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/cert.pem (1123 bytes)
I0916 23:57:03.831021 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/key.pem (1675 bytes)
I0916 23:57:03.831058 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem (1708 bytes)
I0916 23:57:03.831081 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/752707.pem -> /usr/share/ca-certificates/752707.pem
I0916 23:57:03.831094 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem -> /usr/share/ca-certificates/7527072.pem
I0916 23:57:03.831107 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:03.831156 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903
I0916 23:57:03.847964 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33544 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903/id_rsa Username:docker}
I0916 23:57:03.934599 804231 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.pub
I0916 23:57:03.938331 804231 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.pub --> memory (451 bytes)
I0916 23:57:03.950286 804231 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.key
I0916 23:57:03.953541 804231 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.key --> memory (1675 bytes)
I0916 23:57:03.965169 804231 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.crt
I0916 23:57:03.968351 804231 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.crt --> memory (1123 bytes)
I0916 23:57:03.979814 804231 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.key
I0916 23:57:03.982969 804231 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.key --> memory (1675 bytes)
I0916 23:57:03.993972 804231 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.crt
I0916 23:57:03.997171 804231 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.crt --> memory (1094 bytes)
I0916 23:57:04.008607 804231 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.key
I0916 23:57:04.011687 804231 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.key --> memory (1679 bytes)
I0916 23:57:04.023019 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0916 23:57:04.046509 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0916 23:57:04.069781 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0916 23:57:04.092702 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0916 23:57:04.114933 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1436 bytes)
I0916 23:57:04.137173 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1671 bytes)
I0916 23:57:04.159280 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0916 23:57:04.181367 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0916 23:57:04.203980 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/certs/752707.pem --> /usr/share/ca-certificates/752707.pem (1338 bytes)
I0916 23:57:04.230248 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem --> /usr/share/ca-certificates/7527072.pem (1708 bytes)
I0916 23:57:04.253628 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0916 23:57:04.276223 804231 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.pub (451 bytes)
I0916 23:57:04.293552 804231 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.key (1675 bytes)
I0916 23:57:04.309978 804231 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.crt (1123 bytes)
I0916 23:57:04.326237 804231 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.key (1675 bytes)
I0916 23:57:04.342704 804231 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.crt (1094 bytes)
I0916 23:57:04.359099 804231 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.key (1679 bytes)
I0916 23:57:04.375242 804231 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (744 bytes)
I0916 23:57:04.391611 804231 ssh_runner.go:195] Run: openssl version
I0916 23:57:04.396637 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/752707.pem && ln -fs /usr/share/ca-certificates/752707.pem /etc/ssl/certs/752707.pem"
I0916 23:57:04.405389 804231 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/752707.pem
I0916 23:57:04.408604 804231 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 16 23:54 /usr/share/ca-certificates/752707.pem
I0916 23:57:04.408651 804231 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/752707.pem
I0916 23:57:04.414862 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/752707.pem /etc/ssl/certs/51391683.0"
I0916 23:57:04.423583 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/7527072.pem && ln -fs /usr/share/ca-certificates/7527072.pem /etc/ssl/certs/7527072.pem"
I0916 23:57:04.432421 804231 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/7527072.pem
I0916 23:57:04.435706 804231 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 16 23:54 /usr/share/ca-certificates/7527072.pem
I0916 23:57:04.435752 804231 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/7527072.pem
I0916 23:57:04.441863 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/7527072.pem /etc/ssl/certs/3ec20f2e.0"
I0916 23:57:04.450595 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0916 23:57:04.459588 804231 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:04.462866 804231 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 16 23:48 /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:04.462907 804231 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:04.469279 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0916 23:57:04.478135 804231 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0916 23:57:04.481236 804231 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0916 23:57:04.481288 804231 kubeadm.go:926] updating node {m02 192.168.49.3 8443 v1.34.0 containerd true true} ...
I0916 23:57:04.481383 804231 kubeadm.go:938] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-472903-m02 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.3
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:ha-472903 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0916 23:57:04.481425 804231 kube-vip.go:115] generating kube-vip config ...
I0916 23:57:04.481462 804231 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I0916 23:57:04.492937 804231 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I0916 23:57:04.492999 804231 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v1.0.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/admin.conf"
name: kubeconfig
status: {}
I0916 23:57:04.493041 804231 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0916 23:57:04.501084 804231 binaries.go:44] Found k8s binaries, skipping transfer
I0916 23:57:04.501123 804231 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /etc/kubernetes/manifests
I0916 23:57:04.509217 804231 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
I0916 23:57:04.525587 804231 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0916 23:57:04.544042 804231 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1358 bytes)
I0916 23:57:04.561542 804231 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I0916 23:57:04.564725 804231 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0916 23:57:04.574819 804231 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:04.638378 804231 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0916 23:57:04.659569 804231 host.go:66] Checking if "ha-472903" exists ...
I0916 23:57:04.659878 804231 start.go:317] joinCluster: &{Name:ha-472903 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-472903 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[]
DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[]
MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0916 23:57:04.659986 804231 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm token create --print-join-command --ttl=0"
I0916 23:57:04.660033 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903
I0916 23:57:04.678136 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33544 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903/id_rsa Username:docker}
I0916 23:57:04.817608 804231 start.go:343] trying to join control-plane node "m02" to cluster: &{Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0916 23:57:04.817663 804231 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token 79akng.11lpa8n1ba4yh5m1 --discovery-token-ca-cert-hash sha256:52c78ec9ad9a2dc0941e43ce337b864c76ea573e452bc75ed737e69ad76deac1 --ignore-preflight-errors=all --cri-socket unix:///run/containerd/containerd.sock --node-name=ha-472903-m02 --control-plane --apiserver-advertise-address=192.168.49.3 --apiserver-bind-port=8443"
I0916 23:57:23.327384 804231 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token 79akng.11lpa8n1ba4yh5m1 --discovery-token-ca-cert-hash sha256:52c78ec9ad9a2dc0941e43ce337b864c76ea573e452bc75ed737e69ad76deac1 --ignore-preflight-errors=all --cri-socket unix:///run/containerd/containerd.sock --node-name=ha-472903-m02 --control-plane --apiserver-advertise-address=192.168.49.3 --apiserver-bind-port=8443": (18.509693377s)
I0916 23:57:23.327447 804231 ssh_runner.go:195] Run: /bin/bash -c "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet"
I0916 23:57:23.521334 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes ha-472903-m02 minikube.k8s.io/updated_at=2025_09_16T23_57_23_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a minikube.k8s.io/name=ha-472903 minikube.k8s.io/primary=false
I0916 23:57:23.592991 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig taint nodes ha-472903-m02 node-role.kubernetes.io/control-plane:NoSchedule-
I0916 23:57:23.664899 804231 start.go:319] duration metric: took 19.005017018s to joinCluster
I0916 23:57:23.664975 804231 start.go:235] Will wait 6m0s for node &{Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0916 23:57:23.665223 804231 config.go:182] Loaded profile config "ha-472903": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0916 23:57:23.665877 804231 out.go:179] * Verifying Kubernetes components...
I0916 23:57:23.666680 804231 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:23.766393 804231 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0916 23:57:23.779164 804231 kapi.go:59] client config for ha-472903: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.crt", KeyFile:"/home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.key", CAFile:"/home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4620), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
W0916 23:57:23.779228 804231 kubeadm.go:483] Overriding stale ClientConfig host https://192.168.49.254:8443 with https://192.168.49.2:8443
I0916 23:57:23.779511 804231 node_ready.go:35] waiting up to 6m0s for node "ha-472903-m02" to be "Ready" ...
I0916 23:57:24.283593 804231 node_ready.go:49] node "ha-472903-m02" is "Ready"
I0916 23:57:24.283628 804231 node_ready.go:38] duration metric: took 504.097895ms for node "ha-472903-m02" to be "Ready" ...
I0916 23:57:24.283648 804231 api_server.go:52] waiting for apiserver process to appear ...
I0916 23:57:24.283699 804231 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0916 23:57:24.295735 804231 api_server.go:72] duration metric: took 630.723924ms to wait for apiserver process to appear ...
I0916 23:57:24.295758 804231 api_server.go:88] waiting for apiserver healthz status ...
I0916 23:57:24.295774 804231 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0916 23:57:24.299650 804231 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0916 23:57:24.300537 804231 api_server.go:141] control plane version: v1.34.0
I0916 23:57:24.300558 804231 api_server.go:131] duration metric: took 4.795429ms to wait for apiserver health ...
I0916 23:57:24.300566 804231 system_pods.go:43] waiting for kube-system pods to appear ...
I0916 23:57:24.304572 804231 system_pods.go:59] 19 kube-system pods found
I0916 23:57:24.304598 804231 system_pods.go:61] "coredns-66bc5c9577-c94hz" [774f1c0f-9759-44c2-957d-5a97670f951b] Running
I0916 23:57:24.304604 804231 system_pods.go:61] "coredns-66bc5c9577-qn8m7" [1c58205e-e865-42fc-8282-23e3d779ee97] Running
I0916 23:57:24.304608 804231 system_pods.go:61] "etcd-ha-472903" [e333577b-838c-41c5-ba86-ce3d7de57077] Running
I0916 23:57:24.304611 804231 system_pods.go:61] "etcd-ha-472903-m02" [8a478117-c53d-4621-aa09-be3c16d386c0] Pending
I0916 23:57:24.304615 804231 system_pods.go:61] "kindnet-lh7dv" [1da43ca7-9af7-4573-9cdc-fd21b098ca2c] Running
I0916 23:57:24.304621 804231 system_pods.go:61] "kindnet-mwf8l" [8c9533d3-defe-487b-a9b4-0502fb8f2d2a] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-mwf8l": pod kindnet-mwf8l is being deleted, cannot be assigned to a host)
I0916 23:57:24.304628 804231 system_pods.go:61] "kindnet-q7c7s" [85db5b30-8ace-4bb0-8886-32b9ca032b2b] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-q7c7s": pod kindnet-q7c7s is already assigned to node "ha-472903-m02")
I0916 23:57:24.304639 804231 system_pods.go:61] "kube-apiserver-ha-472903" [e2844751-3962-4753-8b63-79c124dd5fd7] Running
I0916 23:57:24.304643 804231 system_pods.go:61] "kube-apiserver-ha-472903-m02" [6675419c-7693-4970-b73c-8415bcda1684] Pending
I0916 23:57:24.304646 804231 system_pods.go:61] "kube-controller-manager-ha-472903" [be5cfd0b-a3b9-44cf-8cde-74e9eb89c738] Running
I0916 23:57:24.304650 804231 system_pods.go:61] "kube-controller-manager-ha-472903-m02" [54f6e7e0-0a78-4651-b24f-f902c6bf7efb] Pending
I0916 23:57:24.304657 804231 system_pods.go:61] "kube-proxy-58lkb" [32fed88c-ce9e-4536-8e96-04ab5b4f5d42] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-58lkb": pod kube-proxy-58lkb is already assigned to node "ha-472903-m02")
I0916 23:57:24.304662 804231 system_pods.go:61] "kube-proxy-d4m8f" [d4a70eec-48a7-4ea6-871a-1b5ed2beca9a] Running
I0916 23:57:24.304666 804231 system_pods.go:61] "kube-proxy-mf26q" [34502b32-75c1-4078-abd2-4e4d625252d8] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-mf26q": pod kube-proxy-mf26q is already assigned to node "ha-472903-m02")
I0916 23:57:24.304670 804231 system_pods.go:61] "kube-scheduler-ha-472903" [e949de65-b218-45cb-abe7-79b704aae473] Running
I0916 23:57:24.304677 804231 system_pods.go:61] "kube-scheduler-ha-472903-m02" [08b5a4f0-3aa6-4a82-b171-afc1eafcd4c2] Pending
I0916 23:57:24.304679 804231 system_pods.go:61] "kube-vip-ha-472903" [ccdab212-cf0c-4bf0-958b-173e1008f7bc] Running
I0916 23:57:24.304682 804231 system_pods.go:61] "kube-vip-ha-472903-m02" [748f096f-bec6-4de8-92f0-128db827bdd6] Pending
I0916 23:57:24.304687 804231 system_pods.go:61] "storage-provisioner" [ac7f283e-4d28-46cf-a519-bd227237d5e7] Running
I0916 23:57:24.304694 804231 system_pods.go:74] duration metric: took 4.122792ms to wait for pod list to return data ...
I0916 23:57:24.304700 804231 default_sa.go:34] waiting for default service account to be created ...
I0916 23:57:24.307165 804231 default_sa.go:45] found service account: "default"
I0916 23:57:24.307183 804231 default_sa.go:55] duration metric: took 2.474442ms for default service account to be created ...
I0916 23:57:24.307190 804231 system_pods.go:116] waiting for k8s-apps to be running ...
I0916 23:57:24.310491 804231 system_pods.go:86] 19 kube-system pods found
I0916 23:57:24.310512 804231 system_pods.go:89] "coredns-66bc5c9577-c94hz" [774f1c0f-9759-44c2-957d-5a97670f951b] Running
I0916 23:57:24.310517 804231 system_pods.go:89] "coredns-66bc5c9577-qn8m7" [1c58205e-e865-42fc-8282-23e3d779ee97] Running
I0916 23:57:24.310520 804231 system_pods.go:89] "etcd-ha-472903" [e333577b-838c-41c5-ba86-ce3d7de57077] Running
I0916 23:57:24.310524 804231 system_pods.go:89] "etcd-ha-472903-m02" [8a478117-c53d-4621-aa09-be3c16d386c0] Pending
I0916 23:57:24.310527 804231 system_pods.go:89] "kindnet-lh7dv" [1da43ca7-9af7-4573-9cdc-fd21b098ca2c] Running
I0916 23:57:24.310532 804231 system_pods.go:89] "kindnet-mwf8l" [8c9533d3-defe-487b-a9b4-0502fb8f2d2a] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-mwf8l": pod kindnet-mwf8l is being deleted, cannot be assigned to a host)
I0916 23:57:24.310556 804231 system_pods.go:89] "kindnet-q7c7s" [85db5b30-8ace-4bb0-8886-32b9ca032b2b] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-q7c7s": pod kindnet-q7c7s is already assigned to node "ha-472903-m02")
I0916 23:57:24.310566 804231 system_pods.go:89] "kube-apiserver-ha-472903" [e2844751-3962-4753-8b63-79c124dd5fd7] Running
I0916 23:57:24.310571 804231 system_pods.go:89] "kube-apiserver-ha-472903-m02" [6675419c-7693-4970-b73c-8415bcda1684] Pending
I0916 23:57:24.310576 804231 system_pods.go:89] "kube-controller-manager-ha-472903" [be5cfd0b-a3b9-44cf-8cde-74e9eb89c738] Running
I0916 23:57:24.310580 804231 system_pods.go:89] "kube-controller-manager-ha-472903-m02" [54f6e7e0-0a78-4651-b24f-f902c6bf7efb] Pending
I0916 23:57:24.310588 804231 system_pods.go:89] "kube-proxy-58lkb" [32fed88c-ce9e-4536-8e96-04ab5b4f5d42] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-58lkb": pod kube-proxy-58lkb is already assigned to node "ha-472903-m02")
I0916 23:57:24.310591 804231 system_pods.go:89] "kube-proxy-d4m8f" [d4a70eec-48a7-4ea6-871a-1b5ed2beca9a] Running
I0916 23:57:24.310596 804231 system_pods.go:89] "kube-proxy-mf26q" [34502b32-75c1-4078-abd2-4e4d625252d8] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-mf26q": pod kube-proxy-mf26q is already assigned to node "ha-472903-m02")
I0916 23:57:24.310600 804231 system_pods.go:89] "kube-scheduler-ha-472903" [e949de65-b218-45cb-abe7-79b704aae473] Running
I0916 23:57:24.310603 804231 system_pods.go:89] "kube-scheduler-ha-472903-m02" [08b5a4f0-3aa6-4a82-b171-afc1eafcd4c2] Pending
I0916 23:57:24.310608 804231 system_pods.go:89] "kube-vip-ha-472903" [ccdab212-cf0c-4bf0-958b-173e1008f7bc] Running
I0916 23:57:24.310611 804231 system_pods.go:89] "kube-vip-ha-472903-m02" [748f096f-bec6-4de8-92f0-128db827bdd6] Pending
I0916 23:57:24.310614 804231 system_pods.go:89] "storage-provisioner" [ac7f283e-4d28-46cf-a519-bd227237d5e7] Running
I0916 23:57:24.310621 804231 system_pods.go:126] duration metric: took 3.426124ms to wait for k8s-apps to be running ...
I0916 23:57:24.310629 804231 system_svc.go:44] waiting for kubelet service to be running ....
I0916 23:57:24.310666 804231 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0916 23:57:24.322152 804231 system_svc.go:56] duration metric: took 11.515834ms WaitForService to wait for kubelet
I0916 23:57:24.322176 804231 kubeadm.go:578] duration metric: took 657.167547ms to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0916 23:57:24.322199 804231 node_conditions.go:102] verifying NodePressure condition ...
I0916 23:57:24.327707 804231 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0916 23:57:24.327734 804231 node_conditions.go:123] node cpu capacity is 8
I0916 23:57:24.327748 804231 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0916 23:57:24.327754 804231 node_conditions.go:123] node cpu capacity is 8
I0916 23:57:24.327759 804231 node_conditions.go:105] duration metric: took 5.554046ms to run NodePressure ...
I0916 23:57:24.327772 804231 start.go:241] waiting for startup goroutines ...
I0916 23:57:24.327803 804231 start.go:255] writing updated cluster config ...
I0916 23:57:24.329316 804231 out.go:203]
I0916 23:57:24.330356 804231 config.go:182] Loaded profile config "ha-472903": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0916 23:57:24.330485 804231 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/config.json ...
I0916 23:57:24.331956 804231 out.go:179] * Starting "ha-472903-m03" control-plane node in "ha-472903" cluster
I0916 23:57:24.332973 804231 cache.go:123] Beginning downloading kic base image for docker with containerd
I0916 23:57:24.333962 804231 out.go:179] * Pulling base image v0.0.48 ...
I0916 23:57:24.334852 804231 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0916 23:57:24.334875 804231 cache.go:58] Caching tarball of preloaded images
I0916 23:57:24.334942 804231 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0916 23:57:24.334986 804231 preload.go:172] Found /home/jenkins/minikube-integration/21550-749120/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I0916 23:57:24.334997 804231 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on containerd
I0916 23:57:24.335117 804231 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/config.json ...
I0916 23:57:24.357217 804231 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0916 23:57:24.357233 804231 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0916 23:57:24.357242 804231 cache.go:232] Successfully downloaded all kic artifacts
I0916 23:57:24.357267 804231 start.go:360] acquireMachinesLock for ha-472903-m03: {Name:mk61000bb8e4699ca3310a7fc257e30a156b69de Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0916 23:57:24.357354 804231 start.go:364] duration metric: took 71.354µs to acquireMachinesLock for "ha-472903-m03"
I0916 23:57:24.357375 804231 start.go:93] Provisioning new machine with config: &{Name:ha-472903 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-472903 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true} {Name:m03 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:fal
se kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPa
th: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name:m03 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0916 23:57:24.357498 804231 start.go:125] createHost starting for "m03" (driver="docker")
I0916 23:57:24.358917 804231 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0916 23:57:24.358994 804231 start.go:159] libmachine.API.Create for "ha-472903" (driver="docker")
I0916 23:57:24.359023 804231 client.go:168] LocalClient.Create starting
I0916 23:57:24.359071 804231 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem
I0916 23:57:24.359103 804231 main.go:141] libmachine: Decoding PEM data...
I0916 23:57:24.359116 804231 main.go:141] libmachine: Parsing certificate...
I0916 23:57:24.359164 804231 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21550-749120/.minikube/certs/cert.pem
I0916 23:57:24.359182 804231 main.go:141] libmachine: Decoding PEM data...
I0916 23:57:24.359192 804231 main.go:141] libmachine: Parsing certificate...
I0916 23:57:24.359366 804231 cli_runner.go:164] Run: docker network inspect ha-472903 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0916 23:57:24.375654 804231 network_create.go:77] Found existing network {name:ha-472903 subnet:0xc001b33bf0 gateway:[0 0 0 0 0 0 0 0 0 0 255 255 192 168 49 1] mtu:1500}
I0916 23:57:24.375684 804231 kic.go:121] calculated static IP "192.168.49.4" for the "ha-472903-m03" container
I0916 23:57:24.375740 804231 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0916 23:57:24.392165 804231 cli_runner.go:164] Run: docker volume create ha-472903-m03 --label name.minikube.sigs.k8s.io=ha-472903-m03 --label created_by.minikube.sigs.k8s.io=true
I0916 23:57:24.408273 804231 oci.go:103] Successfully created a docker volume ha-472903-m03
I0916 23:57:24.408342 804231 cli_runner.go:164] Run: docker run --rm --name ha-472903-m03-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-472903-m03 --entrypoint /usr/bin/test -v ha-472903-m03:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0916 23:57:24.957699 804231 oci.go:107] Successfully prepared a docker volume ha-472903-m03
I0916 23:57:24.957748 804231 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime containerd
I0916 23:57:24.957783 804231 kic.go:194] Starting extracting preloaded images to volume ...
I0916 23:57:24.957856 804231 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-749120/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-472903-m03:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0916 23:57:29.095091 804231 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-749120/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-472903-m03:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (4.13717471s)
I0916 23:57:29.095123 804231 kic.go:203] duration metric: took 4.137337977s to extract preloaded images to volume ...
W0916 23:57:29.095214 804231 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0916 23:57:29.095253 804231 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0916 23:57:29.095300 804231 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0916 23:57:29.145859 804231 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ha-472903-m03 --name ha-472903-m03 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-472903-m03 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ha-472903-m03 --network ha-472903 --ip 192.168.49.4 --volume ha-472903-m03:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0916 23:57:29.392873 804231 cli_runner.go:164] Run: docker container inspect ha-472903-m03 --format={{.State.Running}}
I0916 23:57:29.412389 804231 cli_runner.go:164] Run: docker container inspect ha-472903-m03 --format={{.State.Status}}
I0916 23:57:29.430593 804231 cli_runner.go:164] Run: docker exec ha-472903-m03 stat /var/lib/dpkg/alternatives/iptables
I0916 23:57:29.476672 804231 oci.go:144] the created container "ha-472903-m03" has a running status.
I0916 23:57:29.476707 804231 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m03/id_rsa...
I0916 23:57:29.927926 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m03/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0916 23:57:29.927968 804231 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m03/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0916 23:57:29.954518 804231 cli_runner.go:164] Run: docker container inspect ha-472903-m03 --format={{.State.Status}}
I0916 23:57:29.975503 804231 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0916 23:57:29.975522 804231 kic_runner.go:114] Args: [docker exec --privileged ha-472903-m03 chown docker:docker /home/docker/.ssh/authorized_keys]
I0916 23:57:30.023965 804231 cli_runner.go:164] Run: docker container inspect ha-472903-m03 --format={{.State.Status}}
I0916 23:57:30.040966 804231 machine.go:93] provisionDockerMachine start ...
I0916 23:57:30.041051 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m03
I0916 23:57:30.058157 804231 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:30.058388 804231 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 33554 <nil> <nil>}
I0916 23:57:30.058400 804231 main.go:141] libmachine: About to run SSH command:
hostname
I0916 23:57:30.190964 804231 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-472903-m03
I0916 23:57:30.190995 804231 ubuntu.go:182] provisioning hostname "ha-472903-m03"
I0916 23:57:30.191059 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m03
I0916 23:57:30.208862 804231 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:30.209123 804231 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 33554 <nil> <nil>}
I0916 23:57:30.209144 804231 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-472903-m03 && echo "ha-472903-m03" | sudo tee /etc/hostname
I0916 23:57:30.354363 804231 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-472903-m03
I0916 23:57:30.354466 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m03
I0916 23:57:30.372285 804231 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:30.372570 804231 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 33554 <nil> <nil>}
I0916 23:57:30.372590 804231 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-472903-m03' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-472903-m03/g' /etc/hosts;
else
echo '127.0.1.1 ha-472903-m03' | sudo tee -a /etc/hosts;
fi
fi
I0916 23:57:30.504861 804231 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0916 23:57:30.504898 804231 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21550-749120/.minikube CaCertPath:/home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21550-749120/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21550-749120/.minikube}
I0916 23:57:30.504920 804231 ubuntu.go:190] setting up certificates
I0916 23:57:30.504933 804231 provision.go:84] configureAuth start
I0916 23:57:30.504996 804231 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-472903-m03
I0916 23:57:30.522218 804231 provision.go:143] copyHostCerts
I0916 23:57:30.522259 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/21550-749120/.minikube/ca.pem
I0916 23:57:30.522297 804231 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-749120/.minikube/ca.pem, removing ...
I0916 23:57:30.522306 804231 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-749120/.minikube/ca.pem
I0916 23:57:30.522369 804231 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21550-749120/.minikube/ca.pem (1078 bytes)
I0916 23:57:30.522483 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/21550-749120/.minikube/cert.pem
I0916 23:57:30.522506 804231 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-749120/.minikube/cert.pem, removing ...
I0916 23:57:30.522510 804231 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-749120/.minikube/cert.pem
I0916 23:57:30.522547 804231 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21550-749120/.minikube/cert.pem (1123 bytes)
I0916 23:57:30.522650 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/21550-749120/.minikube/key.pem
I0916 23:57:30.522673 804231 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-749120/.minikube/key.pem, removing ...
I0916 23:57:30.522678 804231 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-749120/.minikube/key.pem
I0916 23:57:30.522703 804231 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21550-749120/.minikube/key.pem (1675 bytes)
I0916 23:57:30.522769 804231 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21550-749120/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca-key.pem org=jenkins.ha-472903-m03 san=[127.0.0.1 192.168.49.4 ha-472903-m03 localhost minikube]
I0916 23:57:30.644066 804231 provision.go:177] copyRemoteCerts
I0916 23:57:30.644118 804231 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0916 23:57:30.644153 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m03
I0916 23:57:30.661612 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33554 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m03/id_rsa Username:docker}
I0916 23:57:30.757452 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0916 23:57:30.757504 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0916 23:57:30.782942 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/machines/server.pem -> /etc/docker/server.pem
I0916 23:57:30.782994 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0916 23:57:30.806508 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0916 23:57:30.806562 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0916 23:57:30.829686 804231 provision.go:87] duration metric: took 324.735799ms to configureAuth
I0916 23:57:30.829709 804231 ubuntu.go:206] setting minikube options for container-runtime
I0916 23:57:30.829902 804231 config.go:182] Loaded profile config "ha-472903": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0916 23:57:30.829916 804231 machine.go:96] duration metric: took 788.930334ms to provisionDockerMachine
I0916 23:57:30.829925 804231 client.go:171] duration metric: took 6.470893656s to LocalClient.Create
I0916 23:57:30.829958 804231 start.go:167] duration metric: took 6.470963089s to libmachine.API.Create "ha-472903"
I0916 23:57:30.829971 804231 start.go:293] postStartSetup for "ha-472903-m03" (driver="docker")
I0916 23:57:30.829982 804231 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0916 23:57:30.830042 804231 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0916 23:57:30.830092 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m03
I0916 23:57:30.847215 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33554 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m03/id_rsa Username:docker}
I0916 23:57:30.945849 804231 ssh_runner.go:195] Run: cat /etc/os-release
I0916 23:57:30.949055 804231 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0916 23:57:30.949086 804231 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0916 23:57:30.949098 804231 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0916 23:57:30.949107 804231 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0916 23:57:30.949120 804231 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-749120/.minikube/addons for local assets ...
I0916 23:57:30.949174 804231 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-749120/.minikube/files for local assets ...
I0916 23:57:30.949274 804231 filesync.go:149] local asset: /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem -> 7527072.pem in /etc/ssl/certs
I0916 23:57:30.949286 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem -> /etc/ssl/certs/7527072.pem
I0916 23:57:30.949392 804231 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0916 23:57:30.957998 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem --> /etc/ssl/certs/7527072.pem (1708 bytes)
I0916 23:57:30.983779 804231 start.go:296] duration metric: took 153.794843ms for postStartSetup
I0916 23:57:30.984109 804231 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-472903-m03
I0916 23:57:31.001367 804231 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/config.json ...
I0916 23:57:31.001618 804231 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0916 23:57:31.001659 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m03
I0916 23:57:31.019034 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33554 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m03/id_rsa Username:docker}
I0916 23:57:31.110814 804231 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0916 23:57:31.115046 804231 start.go:128] duration metric: took 6.757532739s to createHost
I0916 23:57:31.115072 804231 start.go:83] releasing machines lock for "ha-472903-m03", held for 6.757707303s
I0916 23:57:31.115154 804231 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-472903-m03
I0916 23:57:31.133371 804231 out.go:179] * Found network options:
I0916 23:57:31.134481 804231 out.go:179] - NO_PROXY=192.168.49.2,192.168.49.3
W0916 23:57:31.135570 804231 proxy.go:120] fail to check proxy env: Error ip not in block
W0916 23:57:31.135598 804231 proxy.go:120] fail to check proxy env: Error ip not in block
W0916 23:57:31.135626 804231 proxy.go:120] fail to check proxy env: Error ip not in block
W0916 23:57:31.135644 804231 proxy.go:120] fail to check proxy env: Error ip not in block
I0916 23:57:31.135714 804231 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0916 23:57:31.135763 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m03
I0916 23:57:31.135778 804231 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0916 23:57:31.135845 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903-m03
I0916 23:57:31.152320 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33554 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m03/id_rsa Username:docker}
I0916 23:57:31.153909 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33554 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903-m03/id_rsa Username:docker}
I0916 23:57:31.320495 804231 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0916 23:57:31.348141 804231 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0916 23:57:31.348214 804231 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0916 23:57:31.373693 804231 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0916 23:57:31.373720 804231 start.go:495] detecting cgroup driver to use...
I0916 23:57:31.373748 804231 detect.go:190] detected "systemd" cgroup driver on host os
I0916 23:57:31.373802 804231 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0916 23:57:31.385560 804231 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0916 23:57:31.396165 804231 docker.go:218] disabling cri-docker service (if available) ...
I0916 23:57:31.396214 804231 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0916 23:57:31.409119 804231 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0916 23:57:31.422244 804231 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0916 23:57:31.489491 804231 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0916 23:57:31.557098 804231 docker.go:234] disabling docker service ...
I0916 23:57:31.557149 804231 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0916 23:57:31.574601 804231 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0916 23:57:31.585773 804231 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0916 23:57:31.649988 804231 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0916 23:57:31.717070 804231 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0916 23:57:31.727904 804231 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0916 23:57:31.743685 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0916 23:57:31.755962 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0916 23:57:31.766072 804231 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0916 23:57:31.766138 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0916 23:57:31.775522 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0916 23:57:31.785914 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0916 23:57:31.795134 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0916 23:57:31.804565 804231 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0916 23:57:31.813319 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0916 23:57:31.822500 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0916 23:57:31.831597 804231 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0916 23:57:31.840887 804231 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0916 23:57:31.848842 804231 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0916 23:57:31.857026 804231 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:31.920521 804231 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0916 23:57:32.022746 804231 start.go:542] Will wait 60s for socket path /run/containerd/containerd.sock
I0916 23:57:32.022804 804231 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0916 23:57:32.026838 804231 start.go:563] Will wait 60s for crictl version
I0916 23:57:32.026888 804231 ssh_runner.go:195] Run: which crictl
I0916 23:57:32.030295 804231 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0916 23:57:32.064100 804231 start.go:579] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.7.27
RuntimeApiVersion: v1
I0916 23:57:32.064158 804231 ssh_runner.go:195] Run: containerd --version
I0916 23:57:32.088276 804231 ssh_runner.go:195] Run: containerd --version
I0916 23:57:32.114182 804231 out.go:179] * Preparing Kubernetes v1.34.0 on containerd 1.7.27 ...
I0916 23:57:32.115194 804231 out.go:179] - env NO_PROXY=192.168.49.2
I0916 23:57:32.116236 804231 out.go:179] - env NO_PROXY=192.168.49.2,192.168.49.3
I0916 23:57:32.117151 804231 cli_runner.go:164] Run: docker network inspect ha-472903 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0916 23:57:32.133290 804231 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0916 23:57:32.136901 804231 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0916 23:57:32.147860 804231 mustload.go:65] Loading cluster: ha-472903
I0916 23:57:32.148060 804231 config.go:182] Loaded profile config "ha-472903": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0916 23:57:32.148275 804231 cli_runner.go:164] Run: docker container inspect ha-472903 --format={{.State.Status}}
I0916 23:57:32.164278 804231 host.go:66] Checking if "ha-472903" exists ...
I0916 23:57:32.164570 804231 certs.go:68] Setting up /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903 for IP: 192.168.49.4
I0916 23:57:32.164584 804231 certs.go:194] generating shared ca certs ...
I0916 23:57:32.164601 804231 certs.go:226] acquiring lock for ca certs: {Name:mk87d179b4a631193bd9c86db8034ccf19400cde Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:32.164751 804231 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21550-749120/.minikube/ca.key
I0916 23:57:32.164800 804231 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21550-749120/.minikube/proxy-client-ca.key
I0916 23:57:32.164814 804231 certs.go:256] generating profile certs ...
I0916 23:57:32.164911 804231 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.key
I0916 23:57:32.164940 804231 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key.14b885b8
I0916 23:57:32.164958 804231 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt.14b885b8 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.3 192.168.49.4 192.168.49.254]
I0916 23:57:32.342596 804231 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt.14b885b8 ...
I0916 23:57:32.342623 804231 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt.14b885b8: {Name:mk455c3f0ae4544ddcdf75c25cbd1b87a24e61a6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:32.342787 804231 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key.14b885b8 ...
I0916 23:57:32.342799 804231 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key.14b885b8: {Name:mkbd551bf9ae23c129f7e263550d20b4aac5d095 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:32.342871 804231 certs.go:381] copying /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt.14b885b8 -> /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt
I0916 23:57:32.343007 804231 certs.go:385] copying /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key.14b885b8 -> /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key
I0916 23:57:32.343136 804231 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.key
I0916 23:57:32.343152 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0916 23:57:32.343165 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0916 23:57:32.343178 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0916 23:57:32.343191 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0916 23:57:32.343204 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0916 23:57:32.343214 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0916 23:57:32.343229 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0916 23:57:32.343247 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0916 23:57:32.343299 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/752707.pem (1338 bytes)
W0916 23:57:32.343327 804231 certs.go:480] ignoring /home/jenkins/minikube-integration/21550-749120/.minikube/certs/752707_empty.pem, impossibly tiny 0 bytes
I0916 23:57:32.343337 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca-key.pem (1675 bytes)
I0916 23:57:32.343357 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/ca.pem (1078 bytes)
I0916 23:57:32.343379 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/cert.pem (1123 bytes)
I0916 23:57:32.343400 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/key.pem (1675 bytes)
I0916 23:57:32.343464 804231 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem (1708 bytes)
I0916 23:57:32.343501 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:32.343521 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/certs/752707.pem -> /usr/share/ca-certificates/752707.pem
I0916 23:57:32.343534 804231 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem -> /usr/share/ca-certificates/7527072.pem
I0916 23:57:32.343588 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903
I0916 23:57:32.360782 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33544 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903/id_rsa Username:docker}
I0916 23:57:32.447595 804231 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.pub
I0916 23:57:32.451217 804231 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.pub --> memory (451 bytes)
I0916 23:57:32.464033 804231 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.key
I0916 23:57:32.467273 804231 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.key --> memory (1675 bytes)
I0916 23:57:32.478860 804231 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.crt
I0916 23:57:32.482180 804231 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.crt --> memory (1123 bytes)
I0916 23:57:32.493717 804231 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.key
I0916 23:57:32.496761 804231 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.key --> memory (1675 bytes)
I0916 23:57:32.507849 804231 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.crt
I0916 23:57:32.511054 804231 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.crt --> memory (1094 bytes)
I0916 23:57:32.523733 804231 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.key
I0916 23:57:32.526954 804231 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.key --> memory (1679 bytes)
I0916 23:57:32.538314 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0916 23:57:32.561866 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0916 23:57:32.585900 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0916 23:57:32.610048 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0916 23:57:32.634812 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1444 bytes)
I0916 23:57:32.659163 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0916 23:57:32.682157 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0916 23:57:32.704663 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0916 23:57:32.727856 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0916 23:57:32.752740 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/certs/752707.pem --> /usr/share/ca-certificates/752707.pem (1338 bytes)
I0916 23:57:32.775900 804231 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-749120/.minikube/files/etc/ssl/certs/7527072.pem --> /usr/share/ca-certificates/7527072.pem (1708 bytes)
I0916 23:57:32.798720 804231 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.pub (451 bytes)
I0916 23:57:32.815542 804231 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.key (1675 bytes)
I0916 23:57:32.832241 804231 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.crt (1123 bytes)
I0916 23:57:32.848964 804231 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.key (1675 bytes)
I0916 23:57:32.865780 804231 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.crt (1094 bytes)
I0916 23:57:32.882614 804231 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.key (1679 bytes)
I0916 23:57:32.899296 804231 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (744 bytes)
I0916 23:57:32.916516 804231 ssh_runner.go:195] Run: openssl version
I0916 23:57:32.921611 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/7527072.pem && ln -fs /usr/share/ca-certificates/7527072.pem /etc/ssl/certs/7527072.pem"
I0916 23:57:32.930917 804231 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/7527072.pem
I0916 23:57:32.934241 804231 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 16 23:54 /usr/share/ca-certificates/7527072.pem
I0916 23:57:32.934283 804231 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/7527072.pem
I0916 23:57:32.941354 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/7527072.pem /etc/ssl/certs/3ec20f2e.0"
I0916 23:57:32.950335 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0916 23:57:32.959292 804231 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:32.962576 804231 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 16 23:48 /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:32.962623 804231 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:32.968989 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0916 23:57:32.978331 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/752707.pem && ln -fs /usr/share/ca-certificates/752707.pem /etc/ssl/certs/752707.pem"
I0916 23:57:32.987188 804231 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/752707.pem
I0916 23:57:32.990463 804231 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 16 23:54 /usr/share/ca-certificates/752707.pem
I0916 23:57:32.990497 804231 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/752707.pem
I0916 23:57:32.996813 804231 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/752707.pem /etc/ssl/certs/51391683.0"
I0916 23:57:33.005924 804231 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0916 23:57:33.009122 804231 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0916 23:57:33.009183 804231 kubeadm.go:926] updating node {m03 192.168.49.4 8443 v1.34.0 containerd true true} ...
I0916 23:57:33.009266 804231 kubeadm.go:938] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-472903-m03 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.4
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:ha-472903 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0916 23:57:33.009291 804231 kube-vip.go:115] generating kube-vip config ...
I0916 23:57:33.009319 804231 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I0916 23:57:33.021189 804231 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I0916 23:57:33.021246 804231 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v1.0.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/admin.conf"
name: kubeconfig
status: {}
I0916 23:57:33.021293 804231 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0916 23:57:33.029533 804231 binaries.go:44] Found k8s binaries, skipping transfer
I0916 23:57:33.029576 804231 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /etc/kubernetes/manifests
I0916 23:57:33.038861 804231 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
I0916 23:57:33.056092 804231 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0916 23:57:33.075506 804231 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1358 bytes)
I0916 23:57:33.093918 804231 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I0916 23:57:33.097171 804231 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0916 23:57:33.107668 804231 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:33.167706 804231 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0916 23:57:33.188453 804231 host.go:66] Checking if "ha-472903" exists ...
I0916 23:57:33.188671 804231 start.go:317] joinCluster: &{Name:ha-472903 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-472903 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[]
DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true} {Name:m03 IP:192.168.49.4 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:
false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVM
netPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0916 23:57:33.188781 804231 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm token create --print-join-command --ttl=0"
I0916 23:57:33.188819 804231 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-472903
I0916 23:57:33.210165 804231 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33544 SSHKeyPath:/home/jenkins/minikube-integration/21550-749120/.minikube/machines/ha-472903/id_rsa Username:docker}
I0916 23:57:33.351871 804231 start.go:343] trying to join control-plane node "m03" to cluster: &{Name:m03 IP:192.168.49.4 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0916 23:57:33.351930 804231 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token uj456s.97hymgg3kmg6owuv --discovery-token-ca-cert-hash sha256:52c78ec9ad9a2dc0941e43ce337b864c76ea573e452bc75ed737e69ad76deac1 --ignore-preflight-errors=all --cri-socket unix:///run/containerd/containerd.sock --node-name=ha-472903-m03 --control-plane --apiserver-advertise-address=192.168.49.4 --apiserver-bind-port=8443"
I0916 23:57:51.860237 804231 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token uj456s.97hymgg3kmg6owuv --discovery-token-ca-cert-hash sha256:52c78ec9ad9a2dc0941e43ce337b864c76ea573e452bc75ed737e69ad76deac1 --ignore-preflight-errors=all --cri-socket unix:///run/containerd/containerd.sock --node-name=ha-472903-m03 --control-plane --apiserver-advertise-address=192.168.49.4 --apiserver-bind-port=8443": (18.508258539s)
I0916 23:57:51.860308 804231 ssh_runner.go:195] Run: /bin/bash -c "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet"
I0916 23:57:52.080986 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes ha-472903-m03 minikube.k8s.io/updated_at=2025_09_16T23_57_52_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a minikube.k8s.io/name=ha-472903 minikube.k8s.io/primary=false
I0916 23:57:52.152525 804231 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig taint nodes ha-472903-m03 node-role.kubernetes.io/control-plane:NoSchedule-
I0916 23:57:52.226560 804231 start.go:319] duration metric: took 19.037884553s to joinCluster
I0916 23:57:52.226624 804231 start.go:235] Will wait 6m0s for node &{Name:m03 IP:192.168.49.4 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0916 23:57:52.226912 804231 config.go:182] Loaded profile config "ha-472903": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.0
I0916 23:57:52.227744 804231 out.go:179] * Verifying Kubernetes components...
I0916 23:57:52.228620 804231 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:52.334638 804231 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0916 23:57:52.349036 804231 kapi.go:59] client config for ha-472903: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.crt", KeyFile:"/home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.key", CAFile:"/home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4620), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
W0916 23:57:52.349105 804231 kubeadm.go:483] Overriding stale ClientConfig host https://192.168.49.254:8443 with https://192.168.49.2:8443
I0916 23:57:52.349317 804231 node_ready.go:35] waiting up to 6m0s for node "ha-472903-m03" to be "Ready" ...
I0916 23:57:54.352346 804231 node_ready.go:49] node "ha-472903-m03" is "Ready"
I0916 23:57:54.352374 804231 node_ready.go:38] duration metric: took 2.003044453s for node "ha-472903-m03" to be "Ready" ...
I0916 23:57:54.352389 804231 api_server.go:52] waiting for apiserver process to appear ...
I0916 23:57:54.352476 804231 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0916 23:57:54.365259 804231 api_server.go:72] duration metric: took 2.138606454s to wait for apiserver process to appear ...
I0916 23:57:54.365280 804231 api_server.go:88] waiting for apiserver healthz status ...
I0916 23:57:54.365298 804231 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0916 23:57:54.370985 804231 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0916 23:57:54.371831 804231 api_server.go:141] control plane version: v1.34.0
I0916 23:57:54.371850 804231 api_server.go:131] duration metric: took 6.564025ms to wait for apiserver health ...
I0916 23:57:54.371858 804231 system_pods.go:43] waiting for kube-system pods to appear ...
I0916 23:57:54.376785 804231 system_pods.go:59] 27 kube-system pods found
I0916 23:57:54.376811 804231 system_pods.go:61] "coredns-66bc5c9577-c94hz" [774f1c0f-9759-44c2-957d-5a97670f951b] Running
I0916 23:57:54.376815 804231 system_pods.go:61] "coredns-66bc5c9577-qn8m7" [1c58205e-e865-42fc-8282-23e3d779ee97] Running
I0916 23:57:54.376818 804231 system_pods.go:61] "etcd-ha-472903" [e333577b-838c-41c5-ba86-ce3d7de57077] Running
I0916 23:57:54.376822 804231 system_pods.go:61] "etcd-ha-472903-m02" [8a478117-c53d-4621-aa09-be3c16d386c0] Running
I0916 23:57:54.376824 804231 system_pods.go:61] "etcd-ha-472903-m03" [73e10c6a-306a-4c7e-b816-1b8d6b815292] Pending
I0916 23:57:54.376830 804231 system_pods.go:61] "kindnet-2dqnn" [f5c4164d-0d88-4b7b-bc52-18a7e211fe98] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-2dqnn": pod kindnet-2dqnn is already assigned to node "ha-472903-m03")
I0916 23:57:54.376833 804231 system_pods.go:61] "kindnet-lh7dv" [1da43ca7-9af7-4573-9cdc-fd21b098ca2c] Running
I0916 23:57:54.376838 804231 system_pods.go:61] "kindnet-q7c7s" [85db5b30-8ace-4bb0-8886-32b9ca032b2b] Running
I0916 23:57:54.376842 804231 system_pods.go:61] "kindnet-wwdfr" [e86a6e30-712e-4d39-a235-87489d16c0f3] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-wwdfr": pod kindnet-wwdfr is already assigned to node "ha-472903-m03")
I0916 23:57:54.376849 804231 system_pods.go:61] "kindnet-x6twd" [f2346479-1adb-4bc7-af07-971525be2b05] Pending: PodScheduled:SchedulerError (pod f2346479-1adb-4bc7-af07-971525be2b05(kube-system/kindnet-x6twd) is in the cache, so can't be assumed)
I0916 23:57:54.376853 804231 system_pods.go:61] "kube-apiserver-ha-472903" [e2844751-3962-4753-8b63-79c124dd5fd7] Running
I0916 23:57:54.376858 804231 system_pods.go:61] "kube-apiserver-ha-472903-m02" [6675419c-7693-4970-b73c-8415bcda1684] Running
I0916 23:57:54.376861 804231 system_pods.go:61] "kube-apiserver-ha-472903-m03" [8c79e747-e193-4471-a4be-ab4d604998ad] Pending
I0916 23:57:54.376867 804231 system_pods.go:61] "kube-controller-manager-ha-472903" [be5cfd0b-a3b9-44cf-8cde-74e9eb89c738] Running
I0916 23:57:54.376870 804231 system_pods.go:61] "kube-controller-manager-ha-472903-m02" [54f6e7e0-0a78-4651-b24f-f902c6bf7efb] Running
I0916 23:57:54.376873 804231 system_pods.go:61] "kube-controller-manager-ha-472903-m03" [d48b1c84-6653-43e8-9322-ae2c64471dde] Pending
I0916 23:57:54.376876 804231 system_pods.go:61] "kube-proxy-58lkb" [32fed88c-ce9e-4536-8e96-04ab5b4f5d42] Running
I0916 23:57:54.376881 804231 system_pods.go:61] "kube-proxy-d4m8f" [d4a70eec-48a7-4ea6-871a-1b5ed2beca9a] Running
I0916 23:57:54.376885 804231 system_pods.go:61] "kube-proxy-kn6nb" [53644856-9fda-4556-bbb5-12254c4b00a3] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-kn6nb": pod kube-proxy-kn6nb is already assigned to node "ha-472903-m03")
I0916 23:57:54.376889 804231 system_pods.go:61] "kube-proxy-xhlnz" [1967fed1-7529-46d0-accd-ab74751b47fa] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-xhlnz": pod kube-proxy-xhlnz is already assigned to node "ha-472903-m03")
I0916 23:57:54.376894 804231 system_pods.go:61] "kube-scheduler-ha-472903" [e949de65-b218-45cb-abe7-79b704aae473] Running
I0916 23:57:54.376897 804231 system_pods.go:61] "kube-scheduler-ha-472903-m02" [08b5a4f0-3aa6-4a82-b171-afc1eafcd4c2] Running
I0916 23:57:54.376900 804231 system_pods.go:61] "kube-scheduler-ha-472903-m03" [7b954c46-3b8c-47e7-b10c-a20dd936d45c] Pending
I0916 23:57:54.376904 804231 system_pods.go:61] "kube-vip-ha-472903" [ccdab212-cf0c-4bf0-958b-173e1008f7bc] Running
I0916 23:57:54.376907 804231 system_pods.go:61] "kube-vip-ha-472903-m02" [748f096f-bec6-4de8-92f0-128db827bdd6] Running
I0916 23:57:54.376910 804231 system_pods.go:61] "kube-vip-ha-472903-m03" [62b1f237-95c3-4a40-b3b7-a519f7c80ad4] Pending
I0916 23:57:54.376913 804231 system_pods.go:61] "storage-provisioner" [ac7f283e-4d28-46cf-a519-bd227237d5e7] Running
I0916 23:57:54.376918 804231 system_pods.go:74] duration metric: took 5.052009ms to wait for pod list to return data ...
I0916 23:57:54.376925 804231 default_sa.go:34] waiting for default service account to be created ...
I0916 23:57:54.378969 804231 default_sa.go:45] found service account: "default"
I0916 23:57:54.378989 804231 default_sa.go:55] duration metric: took 2.056584ms for default service account to be created ...
I0916 23:57:54.378999 804231 system_pods.go:116] waiting for k8s-apps to be running ...
I0916 23:57:54.383753 804231 system_pods.go:86] 27 kube-system pods found
I0916 23:57:54.383781 804231 system_pods.go:89] "coredns-66bc5c9577-c94hz" [774f1c0f-9759-44c2-957d-5a97670f951b] Running
I0916 23:57:54.383790 804231 system_pods.go:89] "coredns-66bc5c9577-qn8m7" [1c58205e-e865-42fc-8282-23e3d779ee97] Running
I0916 23:57:54.383796 804231 system_pods.go:89] "etcd-ha-472903" [e333577b-838c-41c5-ba86-ce3d7de57077] Running
I0916 23:57:54.383802 804231 system_pods.go:89] "etcd-ha-472903-m02" [8a478117-c53d-4621-aa09-be3c16d386c0] Running
I0916 23:57:54.383812 804231 system_pods.go:89] "etcd-ha-472903-m03" [73e10c6a-306a-4c7e-b816-1b8d6b815292] Pending
I0916 23:57:54.383821 804231 system_pods.go:89] "kindnet-2dqnn" [f5c4164d-0d88-4b7b-bc52-18a7e211fe98] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-2dqnn": pod kindnet-2dqnn is already assigned to node "ha-472903-m03")
I0916 23:57:54.383829 804231 system_pods.go:89] "kindnet-lh7dv" [1da43ca7-9af7-4573-9cdc-fd21b098ca2c] Running
I0916 23:57:54.383837 804231 system_pods.go:89] "kindnet-q7c7s" [85db5b30-8ace-4bb0-8886-32b9ca032b2b] Running
I0916 23:57:54.383842 804231 system_pods.go:89] "kindnet-wwdfr" [e86a6e30-712e-4d39-a235-87489d16c0f3] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-wwdfr": pod kindnet-wwdfr is already assigned to node "ha-472903-m03")
I0916 23:57:54.383852 804231 system_pods.go:89] "kindnet-x6twd" [f2346479-1adb-4bc7-af07-971525be2b05] Pending: PodScheduled:SchedulerError (pod f2346479-1adb-4bc7-af07-971525be2b05(kube-system/kindnet-x6twd) is in the cache, so can't be assumed)
I0916 23:57:54.383863 804231 system_pods.go:89] "kube-apiserver-ha-472903" [e2844751-3962-4753-8b63-79c124dd5fd7] Running
I0916 23:57:54.383874 804231 system_pods.go:89] "kube-apiserver-ha-472903-m02" [6675419c-7693-4970-b73c-8415bcda1684] Running
I0916 23:57:54.383881 804231 system_pods.go:89] "kube-apiserver-ha-472903-m03" [8c79e747-e193-4471-a4be-ab4d604998ad] Pending
I0916 23:57:54.383887 804231 system_pods.go:89] "kube-controller-manager-ha-472903" [be5cfd0b-a3b9-44cf-8cde-74e9eb89c738] Running
I0916 23:57:54.383895 804231 system_pods.go:89] "kube-controller-manager-ha-472903-m02" [54f6e7e0-0a78-4651-b24f-f902c6bf7efb] Running
I0916 23:57:54.383900 804231 system_pods.go:89] "kube-controller-manager-ha-472903-m03" [d48b1c84-6653-43e8-9322-ae2c64471dde] Pending
I0916 23:57:54.383908 804231 system_pods.go:89] "kube-proxy-58lkb" [32fed88c-ce9e-4536-8e96-04ab5b4f5d42] Running
I0916 23:57:54.383913 804231 system_pods.go:89] "kube-proxy-d4m8f" [d4a70eec-48a7-4ea6-871a-1b5ed2beca9a] Running
I0916 23:57:54.383921 804231 system_pods.go:89] "kube-proxy-kn6nb" [53644856-9fda-4556-bbb5-12254c4b00a3] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-kn6nb": pod kube-proxy-kn6nb is already assigned to node "ha-472903-m03")
I0916 23:57:54.383930 804231 system_pods.go:89] "kube-proxy-xhlnz" [1967fed1-7529-46d0-accd-ab74751b47fa] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-xhlnz": pod kube-proxy-xhlnz is already assigned to node "ha-472903-m03")
I0916 23:57:54.383939 804231 system_pods.go:89] "kube-scheduler-ha-472903" [e949de65-b218-45cb-abe7-79b704aae473] Running
I0916 23:57:54.383946 804231 system_pods.go:89] "kube-scheduler-ha-472903-m02" [08b5a4f0-3aa6-4a82-b171-afc1eafcd4c2] Running
I0916 23:57:54.383955 804231 system_pods.go:89] "kube-scheduler-ha-472903-m03" [7b954c46-3b8c-47e7-b10c-a20dd936d45c] Pending
I0916 23:57:54.383962 804231 system_pods.go:89] "kube-vip-ha-472903" [ccdab212-cf0c-4bf0-958b-173e1008f7bc] Running
I0916 23:57:54.383967 804231 system_pods.go:89] "kube-vip-ha-472903-m02" [748f096f-bec6-4de8-92f0-128db827bdd6] Running
I0916 23:57:54.383975 804231 system_pods.go:89] "kube-vip-ha-472903-m03" [62b1f237-95c3-4a40-b3b7-a519f7c80ad4] Pending
I0916 23:57:54.383980 804231 system_pods.go:89] "storage-provisioner" [ac7f283e-4d28-46cf-a519-bd227237d5e7] Running
I0916 23:57:54.383991 804231 system_pods.go:126] duration metric: took 4.985254ms to wait for k8s-apps to be running ...
I0916 23:57:54.384002 804231 system_svc.go:44] waiting for kubelet service to be running ....
I0916 23:57:54.384056 804231 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0916 23:57:54.395540 804231 system_svc.go:56] duration metric: took 11.532177ms WaitForService to wait for kubelet
I0916 23:57:54.395557 804231 kubeadm.go:578] duration metric: took 2.168909422s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0916 23:57:54.395577 804231 node_conditions.go:102] verifying NodePressure condition ...
I0916 23:57:54.398165 804231 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0916 23:57:54.398183 804231 node_conditions.go:123] node cpu capacity is 8
I0916 23:57:54.398194 804231 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0916 23:57:54.398197 804231 node_conditions.go:123] node cpu capacity is 8
I0916 23:57:54.398201 804231 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0916 23:57:54.398205 804231 node_conditions.go:123] node cpu capacity is 8
I0916 23:57:54.398209 804231 node_conditions.go:105] duration metric: took 2.627179ms to run NodePressure ...
I0916 23:57:54.398219 804231 start.go:241] waiting for startup goroutines ...
I0916 23:57:54.398248 804231 start.go:255] writing updated cluster config ...
I0916 23:57:54.398554 804231 ssh_runner.go:195] Run: rm -f paused
I0916 23:57:54.402187 804231 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I0916 23:57:54.402686 804231 kapi.go:59] client config for ha-472903: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.crt", KeyFile:"/home/jenkins/minikube-integration/21550-749120/.minikube/profiles/ha-472903/client.key", CAFile:"/home/jenkins/minikube-integration/21550-749120/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4620), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0916 23:57:54.405144 804231 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-c94hz" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:54.409401 804231 pod_ready.go:94] pod "coredns-66bc5c9577-c94hz" is "Ready"
I0916 23:57:54.409438 804231 pod_ready.go:86] duration metric: took 4.271645ms for pod "coredns-66bc5c9577-c94hz" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:54.409448 804231 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-qn8m7" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:54.413536 804231 pod_ready.go:94] pod "coredns-66bc5c9577-qn8m7" is "Ready"
I0916 23:57:54.413553 804231 pod_ready.go:86] duration metric: took 4.095453ms for pod "coredns-66bc5c9577-qn8m7" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:54.415699 804231 pod_ready.go:83] waiting for pod "etcd-ha-472903" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:54.419599 804231 pod_ready.go:94] pod "etcd-ha-472903" is "Ready"
I0916 23:57:54.419618 804231 pod_ready.go:86] duration metric: took 3.899664ms for pod "etcd-ha-472903" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:54.419627 804231 pod_ready.go:83] waiting for pod "etcd-ha-472903-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:54.423363 804231 pod_ready.go:94] pod "etcd-ha-472903-m02" is "Ready"
I0916 23:57:54.423380 804231 pod_ready.go:86] duration metric: took 3.746731ms for pod "etcd-ha-472903-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:54.423386 804231 pod_ready.go:83] waiting for pod "etcd-ha-472903-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:54.603706 804231 request.go:683] "Waited before sending request" delay="180.227617ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/etcd-ha-472903-m03"
I0916 23:57:54.803902 804231 request.go:683] "Waited before sending request" delay="197.349252ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-472903-m03"
I0916 23:57:55.003954 804231 request.go:683] "Waited before sending request" delay="80.206914ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/etcd-ha-472903-m03"
I0916 23:57:55.203362 804231 request.go:683] "Waited before sending request" delay="196.197515ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-472903-m03"
I0916 23:57:55.206052 804231 pod_ready.go:94] pod "etcd-ha-472903-m03" is "Ready"
I0916 23:57:55.206075 804231 pod_ready.go:86] duration metric: took 782.683771ms for pod "etcd-ha-472903-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:55.403450 804231 request.go:683] "Waited before sending request" delay="197.254129ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods?labelSelector=component%3Dkube-apiserver"
I0916 23:57:55.406629 804231 pod_ready.go:83] waiting for pod "kube-apiserver-ha-472903" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:55.604081 804231 request.go:683] "Waited before sending request" delay="197.327981ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-472903"
I0916 23:57:55.803277 804231 request.go:683] "Waited before sending request" delay="196.28238ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-472903"
I0916 23:57:55.806023 804231 pod_ready.go:94] pod "kube-apiserver-ha-472903" is "Ready"
I0916 23:57:55.806053 804231 pod_ready.go:86] duration metric: took 399.400731ms for pod "kube-apiserver-ha-472903" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:55.806064 804231 pod_ready.go:83] waiting for pod "kube-apiserver-ha-472903-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:56.003360 804231 request.go:683] "Waited before sending request" delay="197.181089ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-472903-m02"
I0916 23:57:56.203591 804231 request.go:683] "Waited before sending request" delay="197.334062ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-472903-m02"
I0916 23:57:56.206593 804231 pod_ready.go:94] pod "kube-apiserver-ha-472903-m02" is "Ready"
I0916 23:57:56.206619 804231 pod_ready.go:86] duration metric: took 400.548564ms for pod "kube-apiserver-ha-472903-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:56.206627 804231 pod_ready.go:83] waiting for pod "kube-apiserver-ha-472903-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:56.404053 804231 request.go:683] "Waited before sending request" delay="197.330591ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-472903-m03"
I0916 23:57:56.603366 804231 request.go:683] "Waited before sending request" delay="196.334008ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-472903-m03"
I0916 23:57:56.606216 804231 pod_ready.go:94] pod "kube-apiserver-ha-472903-m03" is "Ready"
I0916 23:57:56.606240 804231 pod_ready.go:86] duration metric: took 399.60823ms for pod "kube-apiserver-ha-472903-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:56.803696 804231 request.go:683] "Waited before sending request" delay="197.341894ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods?labelSelector=component%3Dkube-controller-manager"
I0916 23:57:56.806878 804231 pod_ready.go:83] waiting for pod "kube-controller-manager-ha-472903" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:57.003237 804231 request.go:683] "Waited before sending request" delay="196.261492ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-472903"
I0916 23:57:57.203189 804231 request.go:683] "Waited before sending request" delay="197.16206ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-472903"
I0916 23:57:57.205847 804231 pod_ready.go:94] pod "kube-controller-manager-ha-472903" is "Ready"
I0916 23:57:57.205870 804231 pod_ready.go:86] duration metric: took 398.97003ms for pod "kube-controller-manager-ha-472903" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:57.205878 804231 pod_ready.go:83] waiting for pod "kube-controller-manager-ha-472903-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:57.403223 804231 request.go:683] "Waited before sending request" delay="197.233762ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-472903-m02"
I0916 23:57:57.603503 804231 request.go:683] "Waited before sending request" delay="197.308924ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-472903-m02"
I0916 23:57:57.606309 804231 pod_ready.go:94] pod "kube-controller-manager-ha-472903-m02" is "Ready"
I0916 23:57:57.606331 804231 pod_ready.go:86] duration metric: took 400.447455ms for pod "kube-controller-manager-ha-472903-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:57.606339 804231 pod_ready.go:83] waiting for pod "kube-controller-manager-ha-472903-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:57.803572 804231 request.go:683] "Waited before sending request" delay="197.156861ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-472903-m03"
I0916 23:57:58.003564 804231 request.go:683] "Waited before sending request" delay="197.308739ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-472903-m03"
I0916 23:57:58.006495 804231 pod_ready.go:94] pod "kube-controller-manager-ha-472903-m03" is "Ready"
I0916 23:57:58.006527 804231 pod_ready.go:86] duration metric: took 400.177209ms for pod "kube-controller-manager-ha-472903-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:58.203971 804231 request.go:683] "Waited before sending request" delay="197.330656ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods?labelSelector=k8s-app%3Dkube-proxy"
I0916 23:57:58.207087 804231 pod_ready.go:83] waiting for pod "kube-proxy-58lkb" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:58.403484 804231 request.go:683] "Waited before sending request" delay="196.298118ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-58lkb"
I0916 23:57:58.603727 804231 request.go:683] "Waited before sending request" delay="197.238459ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-472903-m02"
I0916 23:57:58.606561 804231 pod_ready.go:94] pod "kube-proxy-58lkb" is "Ready"
I0916 23:57:58.606586 804231 pod_ready.go:86] duration metric: took 399.476011ms for pod "kube-proxy-58lkb" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:58.606593 804231 pod_ready.go:83] waiting for pod "kube-proxy-d4m8f" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:58.804003 804231 request.go:683] "Waited before sending request" delay="197.323847ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-d4m8f"
I0916 23:57:59.003937 804231 request.go:683] "Waited before sending request" delay="197.340178ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-472903"
I0916 23:57:59.006899 804231 pod_ready.go:94] pod "kube-proxy-d4m8f" is "Ready"
I0916 23:57:59.006927 804231 pod_ready.go:86] duration metric: took 400.327971ms for pod "kube-proxy-d4m8f" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:59.006938 804231 pod_ready.go:83] waiting for pod "kube-proxy-kn6nb" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:57:59.203366 804231 request.go:683] "Waited before sending request" delay="196.341882ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-kn6nb"
I0916 23:57:59.403608 804231 request.go:683] "Waited before sending request" delay="197.193431ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-472903-m03"
I0916 23:57:59.604047 804231 request.go:683] "Waited before sending request" delay="96.244025ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-kn6nb"
I0916 23:57:59.803112 804231 request.go:683] "Waited before sending request" delay="196.282766ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-472903-m03"
I0916 23:58:00.203120 804231 request.go:683] "Waited before sending request" delay="192.276334ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-472903-m03"
I0916 23:58:00.603459 804231 request.go:683] "Waited before sending request" delay="93.218157ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-472903-m03"
W0916 23:58:01.014543 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
W0916 23:58:03.512871 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
W0916 23:58:06.012965 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
W0916 23:58:08.512763 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
W0916 23:58:11.012966 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
W0916 23:58:13.013166 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
W0916 23:58:15.512655 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
W0916 23:58:18.012615 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
W0916 23:58:20.513188 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
W0916 23:58:23.012908 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
W0916 23:58:25.013240 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
W0916 23:58:27.512733 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
W0916 23:58:30.012142 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
W0916 23:58:32.012503 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
W0916 23:58:34.013070 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
W0916 23:58:36.512643 804231 pod_ready.go:104] pod "kube-proxy-kn6nb" is not "Ready", error: <nil>
I0916 23:58:37.014670 804231 pod_ready.go:94] pod "kube-proxy-kn6nb" is "Ready"
I0916 23:58:37.014697 804231 pod_ready.go:86] duration metric: took 38.007753603s for pod "kube-proxy-kn6nb" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:37.017732 804231 pod_ready.go:83] waiting for pod "kube-scheduler-ha-472903" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:37.022228 804231 pod_ready.go:94] pod "kube-scheduler-ha-472903" is "Ready"
I0916 23:58:37.022246 804231 pod_ready.go:86] duration metric: took 4.488553ms for pod "kube-scheduler-ha-472903" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:37.022253 804231 pod_ready.go:83] waiting for pod "kube-scheduler-ha-472903-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:37.026173 804231 pod_ready.go:94] pod "kube-scheduler-ha-472903-m02" is "Ready"
I0916 23:58:37.026191 804231 pod_ready.go:86] duration metric: took 3.932068ms for pod "kube-scheduler-ha-472903-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:37.026198 804231 pod_ready.go:83] waiting for pod "kube-scheduler-ha-472903-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:37.030029 804231 pod_ready.go:94] pod "kube-scheduler-ha-472903-m03" is "Ready"
I0916 23:58:37.030046 804231 pod_ready.go:86] duration metric: took 3.843487ms for pod "kube-scheduler-ha-472903-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:37.030054 804231 pod_ready.go:40] duration metric: took 42.627839542s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I0916 23:58:37.073472 804231 start.go:617] kubectl: 1.34.1, cluster: 1.34.0 (minor skew: 0)
I0916 23:58:37.074923 804231 out.go:179] * Done! kubectl is now configured to use "ha-472903" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
0a41d8b587e02 8c811b4aec35f 12 minutes ago Running busybox 0 a2422ee3e6e6d busybox-7b57f96db7-6hrm6
f33de265effb1 6e38f40d628db 13 minutes ago Running storage-provisioner 1 1c0713f862ea0 storage-provisioner
9f103b05d2d6f 52546a367cc9e 13 minutes ago Running coredns 0 9579263342827 coredns-66bc5c9577-c94hz
3b457407f10e3 52546a367cc9e 13 minutes ago Running coredns 0 290cfb537788e coredns-66bc5c9577-qn8m7
cc69d2451cb65 409467f978b4a 13 minutes ago Running kindnet-cni 0 3e17d6ae9b2a6 kindnet-lh7dv
f4767b6363ce9 6e38f40d628db 13 minutes ago Exited storage-provisioner 0 1c0713f862ea0 storage-provisioner
92dd4d116eb03 df0860106674d 13 minutes ago Running kube-proxy 0 8c0ecd5301326 kube-proxy-d4m8f
3cb75495f7a54 765655ea60781 13 minutes ago Running kube-vip 0 4c425da29992d kube-vip-ha-472903
bba28cace6502 46169d968e920 13 minutes ago Running kube-scheduler 0 f18dd7697c60f kube-scheduler-ha-472903
087290a41f59c a0af72f2ec6d6 13 minutes ago Running kube-controller-manager 0 0760ebe1d2a56 kube-controller-manager-ha-472903
0aba62132d764 90550c43ad2bc 13 minutes ago Running kube-apiserver 0 8ad1fa8bc0267 kube-apiserver-ha-472903
23c0af0bdbe95 5f1f5298c888d 13 minutes ago Running etcd 0 b01a62742caec etcd-ha-472903
==> containerd <==
Sep 16 23:57:20 ha-472903 containerd[765]: time="2025-09-16T23:57:20.857383931Z" level=info msg="StartContainer for \"9f103b05d2d6fd9df1ffca0135173363251e58587aa3f9093200d96a7302d315\""
Sep 16 23:57:20 ha-472903 containerd[765]: time="2025-09-16T23:57:20.915209442Z" level=info msg="StartContainer for \"9f103b05d2d6fd9df1ffca0135173363251e58587aa3f9093200d96a7302d315\" returns successfully"
Sep 16 23:57:26 ha-472903 containerd[765]: time="2025-09-16T23:57:26.847849669Z" level=info msg="received exit event container_id:\"f4767b6363ce9c18f8b183cfefd42f69a8b6845fea9e30eec23d90668bc0a3f8\" id:\"f4767b6363ce9c18f8b183cfefd42f69a8b6845fea9e30eec23d90668bc0a3f8\" pid:2188 exit_status:1 exited_at:{seconds:1758067046 nanos:847300745}"
Sep 16 23:57:29 ha-472903 containerd[765]: time="2025-09-16T23:57:29.084468964Z" level=info msg="shim disconnected" id=f4767b6363ce9c18f8b183cfefd42f69a8b6845fea9e30eec23d90668bc0a3f8 namespace=k8s.io
Sep 16 23:57:29 ha-472903 containerd[765]: time="2025-09-16T23:57:29.084514637Z" level=warning msg="cleaning up after shim disconnected" id=f4767b6363ce9c18f8b183cfefd42f69a8b6845fea9e30eec23d90668bc0a3f8 namespace=k8s.io
Sep 16 23:57:29 ha-472903 containerd[765]: time="2025-09-16T23:57:29.084528446Z" level=info msg="cleaning up dead shim" namespace=k8s.io
Sep 16 23:57:29 ha-472903 containerd[765]: time="2025-09-16T23:57:29.861023305Z" level=info msg="CreateContainer within sandbox \"1c0713f862ea047ef39e7ae39aea7b7769255565bbf61da2859ac341b5b32bca\" for container &ContainerMetadata{Name:storage-provisioner,Attempt:1,}"
Sep 16 23:57:29 ha-472903 containerd[765]: time="2025-09-16T23:57:29.875038922Z" level=info msg="CreateContainer within sandbox \"1c0713f862ea047ef39e7ae39aea7b7769255565bbf61da2859ac341b5b32bca\" for &ContainerMetadata{Name:storage-provisioner,Attempt:1,} returns container id \"f33de265effb1050318db82caef7df35706c6a78a2f601466a28e71f4048fedc\""
Sep 16 23:57:29 ha-472903 containerd[765]: time="2025-09-16T23:57:29.875884762Z" level=info msg="StartContainer for \"f33de265effb1050318db82caef7df35706c6a78a2f601466a28e71f4048fedc\""
Sep 16 23:57:29 ha-472903 containerd[765]: time="2025-09-16T23:57:29.929708067Z" level=info msg="StartContainer for \"f33de265effb1050318db82caef7df35706c6a78a2f601466a28e71f4048fedc\" returns successfully"
Sep 16 23:58:40 ha-472903 containerd[765]: time="2025-09-16T23:58:40.362974621Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox-7b57f96db7-6hrm6,Uid:bd03bad4-af1e-42d0-81fb-6fcaeaa8775e,Namespace:default,Attempt:0,}"
Sep 16 23:58:40 ha-472903 containerd[765]: time="2025-09-16T23:58:40.455106923Z" level=warning msg="error from *cgroupsv2.Manager.EventChan" error="failed to create inotify fd"
Sep 16 23:58:40 ha-472903 containerd[765]: time="2025-09-16T23:58:40.455480779Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox-7b57f96db7-6hrm6,Uid:bd03bad4-af1e-42d0-81fb-6fcaeaa8775e,Namespace:default,Attempt:0,} returns sandbox id \"a2422ee3e6e6de2b23238fbdd05d962f2c25009569227e21869b285e5353e70a\""
Sep 16 23:58:40 ha-472903 containerd[765]: time="2025-09-16T23:58:40.457290181Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28\""
Sep 16 23:58:42 ha-472903 containerd[765]: time="2025-09-16T23:58:42.440332779Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Sep 16 23:58:42 ha-472903 containerd[765]: time="2025-09-16T23:58:42.440968214Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28: active requests=0, bytes read=727667"
Sep 16 23:58:42 ha-472903 containerd[765]: time="2025-09-16T23:58:42.442025332Z" level=info msg="ImageCreate event name:\"sha256:8c811b4aec35f259572d0f79207bc0678df4c736eeec50bc9fec37ed936a472a\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Sep 16 23:58:42 ha-472903 containerd[765]: time="2025-09-16T23:58:42.443719507Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:9afb80db71730dbb303fe00765cbf34bddbdc6b66e49897fc2e1861967584b12\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Sep 16 23:58:42 ha-472903 containerd[765]: time="2025-09-16T23:58:42.444221405Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28\" with image id \"sha256:8c811b4aec35f259572d0f79207bc0678df4c736eeec50bc9fec37ed936a472a\", repo tag \"gcr.io/k8s-minikube/busybox:1.28\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:9afb80db71730dbb303fe00765cbf34bddbdc6b66e49897fc2e1861967584b12\", size \"725911\" in 1.986887608s"
Sep 16 23:58:42 ha-472903 containerd[765]: time="2025-09-16T23:58:42.444254598Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28\" returns image reference \"sha256:8c811b4aec35f259572d0f79207bc0678df4c736eeec50bc9fec37ed936a472a\""
Sep 16 23:58:42 ha-472903 containerd[765]: time="2025-09-16T23:58:42.447875079Z" level=info msg="CreateContainer within sandbox \"a2422ee3e6e6de2b23238fbdd05d962f2c25009569227e21869b285e5353e70a\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Sep 16 23:58:42 ha-472903 containerd[765]: time="2025-09-16T23:58:42.457018566Z" level=info msg="CreateContainer within sandbox \"a2422ee3e6e6de2b23238fbdd05d962f2c25009569227e21869b285e5353e70a\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"0a41d8b587e021b881cec481dd5e13b98d695cdba1671a8eb501413b121637d8\""
Sep 16 23:58:42 ha-472903 containerd[765]: time="2025-09-16T23:58:42.457508138Z" level=info msg="StartContainer for \"0a41d8b587e021b881cec481dd5e13b98d695cdba1671a8eb501413b121637d8\""
Sep 16 23:58:42 ha-472903 containerd[765]: time="2025-09-16T23:58:42.510633374Z" level=warning msg="error from *cgroupsv2.Manager.EventChan" error="failed to create inotify fd"
Sep 16 23:58:42 ha-472903 containerd[765]: time="2025-09-16T23:58:42.512731136Z" level=info msg="StartContainer for \"0a41d8b587e021b881cec481dd5e13b98d695cdba1671a8eb501413b121637d8\" returns successfully"
==> coredns [3b457407f10e357ce33da7fa3fb4333f8312f0d3e3570cf8528cdcac8f5a1d0f] <==
[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] 127.0.0.1:47028 - 14410 "HINFO IN 8622750158419892651.814616782938826920. udp 56 false 512" NXDOMAIN qr,rd,ra 131 0.021349234s
[INFO] 10.244.1.2:52581 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000299614s
[INFO] 10.244.1.2:57899 - 3 "AAAA IN kubernetes.io. udp 31 false 512" NOERROR qr,rd,ra 140 0.012540337s
[INFO] 10.244.1.2:54323 - 5 "PTR IN 135.186.33.3.in-addr.arpa. udp 43 false 512" NOERROR qr,rd,ra 124 0.008980197s
[INFO] 10.244.1.2:53799 - 6 "PTR IN 90.167.197.15.in-addr.arpa. udp 44 false 512" NOERROR qr,rd,ra 126 0.009949044s
[INFO] 10.244.0.4:39485 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000157098s
[INFO] 10.244.0.4:57871 - 4 "A IN kubernetes.io. udp 31 false 512" NOERROR qr,rd,ra 89 0.000750185s
[INFO] 10.244.0.4:53410 - 5 "PTR IN 90.167.197.15.in-addr.arpa. udp 44 false 512" NOERROR qr,aa,rd,ra 126 0.000089028s
[INFO] 10.244.1.2:37733 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000150317s
[INFO] 10.244.1.2:59346 - 3 "AAAA IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,rd,ra 111 0.028128363s
[INFO] 10.244.1.2:43091 - 6 "A IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,rd,ra 111 0.01004668s
[INFO] 10.244.1.2:37227 - 7 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000191819s
[INFO] 10.244.1.2:40079 - 9 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000125376s
[INFO] 10.244.0.4:38168 - 5 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000181114s
[INFO] 10.244.0.4:60067 - 6 "A IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,aa,rd,ra 111 0.000087147s
[INFO] 10.244.0.4:47611 - 7 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000122939s
[INFO] 10.244.0.4:37626 - 8 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000121195s
[INFO] 10.244.1.2:42817 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000159509s
[INFO] 10.244.1.2:33910 - 3 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000186538s
[INFO] 10.244.1.2:37929 - 5 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000109836s
[INFO] 10.244.0.4:50698 - 3 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000212263s
[INFO] 10.244.0.4:33166 - 5 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000100167s
==> coredns [9f103b05d2d6fd9df1ffca0135173363251e58587aa3f9093200d96a7302d315] <==
maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] 127.0.0.1:45239 - 14115 "HINFO IN 5883645869461503498.3950535614037284853. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.058516241s
[INFO] 10.244.1.2:55352 - 4 "A IN kubernetes.io. udp 31 false 512" NOERROR qr,rd,ra 89 0.003252862s
[INFO] 10.244.0.4:33650 - 3 "AAAA IN kubernetes.io. udp 31 false 512" NOERROR qr,rd,ra 140 0.001640931s
[INFO] 10.244.0.4:50077 - 6 "PTR IN 135.186.33.3.in-addr.arpa. udp 43 false 512" NOERROR qr,rd,ra 124 0.000621363s
[INFO] 10.244.1.2:48439 - 4 "AAAA IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000189187s
[INFO] 10.244.1.2:39582 - 5 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000151327s
[INFO] 10.244.1.2:59539 - 8 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000140715s
[INFO] 10.244.0.4:42999 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000177514s
[INFO] 10.244.0.4:36769 - 3 "AAAA IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,rd,ra 111 0.010694753s
[INFO] 10.244.0.4:53074 - 4 "AAAA IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000158932s
[INFO] 10.244.0.4:57223 - 9 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.00012213s
[INFO] 10.244.1.2:50810 - 4 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000176678s
[INFO] 10.244.0.4:58045 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000142445s
[INFO] 10.244.0.4:39777 - 4 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000123555s
==> describe nodes <==
Name: ha-472903
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=ha-472903
kubernetes.io/os=linux
minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a
minikube.k8s.io/name=ha-472903
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_09_16T23_56_50_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Tue, 16 Sep 2025 23:56:47 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: ha-472903
AcquireTime: <unset>
RenewTime: Wed, 17 Sep 2025 00:10:35 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Wed, 17 Sep 2025 00:08:42 +0000 Tue, 16 Sep 2025 23:56:46 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 17 Sep 2025 00:08:42 +0000 Tue, 16 Sep 2025 23:56:46 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 17 Sep 2025 00:08:42 +0000 Tue, 16 Sep 2025 23:56:46 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 17 Sep 2025 00:08:42 +0000 Tue, 16 Sep 2025 23:56:47 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: ha-472903
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863456Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863456Ki
pods: 110
System Info:
Machine ID: ac22e2ab5b0349cdb9474983aa23278e
System UUID: 695af4c7-28fb-4299-9454-75db3262ca2c
Boot ID: 4acfd7d3-9698-436f-b4ae-efdf6bd483d5
Kernel Version: 6.8.0-1037-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.7.27
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox-7b57f96db7-6hrm6 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system coredns-66bc5c9577-c94hz 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 13m
kube-system coredns-66bc5c9577-qn8m7 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 13m
kube-system etcd-ha-472903 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 13m
kube-system kindnet-lh7dv 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 13m
kube-system kube-apiserver-ha-472903 250m (3%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-controller-manager-ha-472903 200m (2%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-proxy-d4m8f 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-scheduler-ha-472903 100m (1%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-vip-ha-472903 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 950m (11%) 100m (1%)
memory 290Mi (0%) 390Mi (1%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 13m kube-proxy
Normal NodeHasSufficientPID 13m (x7 over 13m) kubelet Node ha-472903 status is now: NodeHasSufficientPID
Normal Starting 13m kubelet Starting kubelet.
Normal NodeHasSufficientMemory 13m (x8 over 13m) kubelet Node ha-472903 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 13m (x8 over 13m) kubelet Node ha-472903 status is now: NodeHasNoDiskPressure
Normal NodeAllocatableEnforced 13m kubelet Updated Node Allocatable limit across pods
Normal Starting 13m kubelet Starting kubelet.
Normal NodeAllocatableEnforced 13m kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 13m kubelet Node ha-472903 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 13m kubelet Node ha-472903 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 13m kubelet Node ha-472903 status is now: NodeHasSufficientPID
Normal RegisteredNode 13m node-controller Node ha-472903 event: Registered Node ha-472903 in Controller
Normal RegisteredNode 13m node-controller Node ha-472903 event: Registered Node ha-472903 in Controller
Normal RegisteredNode 12m node-controller Node ha-472903 event: Registered Node ha-472903 in Controller
Name: ha-472903-m02
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=ha-472903-m02
kubernetes.io/os=linux
minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a
minikube.k8s.io/name=ha-472903
minikube.k8s.io/primary=false
minikube.k8s.io/updated_at=2025_09_16T23_57_23_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Tue, 16 Sep 2025 23:57:22 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: ha-472903-m02
AcquireTime: <unset>
RenewTime: Wed, 17 Sep 2025 00:10:38 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Wed, 17 Sep 2025 00:07:45 +0000 Tue, 16 Sep 2025 23:57:22 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 17 Sep 2025 00:07:45 +0000 Tue, 16 Sep 2025 23:57:22 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 17 Sep 2025 00:07:45 +0000 Tue, 16 Sep 2025 23:57:22 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 17 Sep 2025 00:07:45 +0000 Tue, 16 Sep 2025 23:57:24 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.3
Hostname: ha-472903-m02
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863456Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863456Ki
pods: 110
System Info:
Machine ID: 4094672df3d84509ae4c88c54f7f5e93
System UUID: 85df9db8-f21a-4038-9f8c-4cc1d81dc0d5
Boot ID: 4acfd7d3-9698-436f-b4ae-efdf6bd483d5
Kernel Version: 6.8.0-1037-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.7.27
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.1.0/24
PodCIDRs: 10.244.1.0/24
Non-terminated Pods: (8 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox-7b57f96db7-4jfjt 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system etcd-ha-472903-m02 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 13m
kube-system kindnet-q7c7s 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 13m
kube-system kube-apiserver-ha-472903-m02 250m (3%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-controller-manager-ha-472903-m02 200m (2%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-proxy-58lkb 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-scheduler-ha-472903-m02 100m (1%) 0 (0%) 0 (0%) 0 (0%) 13m
kube-system kube-vip-ha-472903-m02 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%) 100m (1%)
memory 150Mi (0%) 50Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 13m kube-proxy
Normal RegisteredNode 13m node-controller Node ha-472903-m02 event: Registered Node ha-472903-m02 in Controller
Normal RegisteredNode 13m node-controller Node ha-472903-m02 event: Registered Node ha-472903-m02 in Controller
Normal RegisteredNode 12m node-controller Node ha-472903-m02 event: Registered Node ha-472903-m02 in Controller
Name: ha-472903-m03
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=ha-472903-m03
kubernetes.io/os=linux
minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a
minikube.k8s.io/name=ha-472903
minikube.k8s.io/primary=false
minikube.k8s.io/updated_at=2025_09_16T23_57_52_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Tue, 16 Sep 2025 23:57:51 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: ha-472903-m03
AcquireTime: <unset>
RenewTime: Wed, 17 Sep 2025 00:10:36 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Wed, 17 Sep 2025 00:09:45 +0000 Tue, 16 Sep 2025 23:57:51 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 17 Sep 2025 00:09:45 +0000 Tue, 16 Sep 2025 23:57:51 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 17 Sep 2025 00:09:45 +0000 Tue, 16 Sep 2025 23:57:51 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 17 Sep 2025 00:09:45 +0000 Tue, 16 Sep 2025 23:57:54 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.4
Hostname: ha-472903-m03
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863456Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863456Ki
pods: 110
System Info:
Machine ID: 9964c713c65f4333be8a877aab744040
System UUID: 7eb7f2ee-a32d-4876-a4ad-58f745b9c377
Boot ID: 4acfd7d3-9698-436f-b4ae-efdf6bd483d5
Kernel Version: 6.8.0-1037-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.7.27
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.2.0/24
PodCIDRs: 10.244.2.0/24
Non-terminated Pods: (8 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox-7b57f96db7-mknzs 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system etcd-ha-472903-m03 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 12m
kube-system kindnet-x6twd 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 12m
kube-system kube-apiserver-ha-472903-m03 250m (3%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-controller-manager-ha-472903-m03 200m (2%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-proxy-kn6nb 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-scheduler-ha-472903-m03 100m (1%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-vip-ha-472903-m03 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%) 100m (1%)
memory 150Mi (0%) 50Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal RegisteredNode 12m node-controller Node ha-472903-m03 event: Registered Node ha-472903-m03 in Controller
Normal RegisteredNode 12m node-controller Node ha-472903-m03 event: Registered Node ha-472903-m03 in Controller
Normal RegisteredNode 12m node-controller Node ha-472903-m03 event: Registered Node ha-472903-m03 in Controller
==> dmesg <==
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff 86 e8 75 4b 01 57 08 06
[ +0.025562] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff be 3d a9 85 b1 bd 08 06
[ +13.150028] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff d6 5c f0 26 cd ba 08 06
[ +0.000341] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 3a 20 90 fb f5 d8 08 06
[ +28.639349] IPv4: martian source 10.244.0.1 from 10.244.0.4, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 06 26 63 8d db 90 08 06
[ +0.000417] IPv4: martian source 10.244.0.4 from 10.244.0.3, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff be 3d a9 85 b1 bd 08 06
[ +0.836892] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 56 cc 9b 52 38 94 08 06
[ +0.080327] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff 3e 79 8e c8 7e 37 08 06
[Sep16 23:40] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 66 3b 76 df aa 6a 08 06
[ +20.325550] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff de 39 4b 41 df 63 08 06
[ +0.000318] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 66 3b 76 df aa 6a 08 06
[ +8.925776] IPv4: martian source 10.244.0.1 from 10.244.0.4, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 3e cd c1 f7 dc c8 08 06
[ +0.000373] IPv4: martian source 10.244.0.4 from 10.244.0.3, on dev eth0
[ +0.000004] ll header: 00000000: ff ff ff ff ff ff 3e 79 8e c8 7e 37 08 06
==> etcd [23c0af0bdbe9526d53769461ed9f80d8c743b02e625b65cce39c888f5e7d4b4e] <==
{"level":"info","ts":"2025-09-16T23:57:38.284368Z","caller":"rafthttp/stream.go:273","msg":"established TCP streaming connection with remote peer","stream-writer-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"ab9d0391dce79465"}
{"level":"info","ts":"2025-09-16T23:57:38.288146Z","caller":"etcdserver/server.go:1838","msg":"sending merged snapshot","from":"aec36adc501070cc","to":"ab9d0391dce79465","bytes":1356737,"size":"1.4 MB"}
{"level":"info","ts":"2025-09-16T23:57:38.288252Z","caller":"rafthttp/snapshot_sender.go:82","msg":"sending database snapshot","snapshot-index":679,"remote-peer-id":"ab9d0391dce79465","bytes":1356737,"size":"1.4 MB"}
{"level":"info","ts":"2025-09-16T23:57:38.293060Z","caller":"etcdserver/snapshot_merge.go:64","msg":"sent database snapshot to writer","bytes":1347584,"size":"1.3 MB"}
{"level":"info","ts":"2025-09-16T23:57:38.299128Z","caller":"rafthttp/snapshot_sender.go:131","msg":"sent database snapshot","snapshot-index":679,"remote-peer-id":"ab9d0391dce79465","bytes":1356737,"size":"1.4 MB"}
{"level":"info","ts":"2025-09-16T23:57:38.314973Z","caller":"rafthttp/stream.go:411","msg":"established TCP streaming connection with remote peer","stream-reader-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"ab9d0391dce79465"}
{"level":"info","ts":"2025-09-16T23:57:38.321619Z","caller":"rafthttp/stream.go:248","msg":"set message encoder","from":"aec36adc501070cc","to":"ab9d0391dce79465","stream-type":"stream Message"}
{"level":"warn","ts":"2025-09-16T23:57:38.321647Z","caller":"rafthttp/stream.go:264","msg":"closed TCP streaming connection with remote peer","stream-writer-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"ab9d0391dce79465"}
{"level":"info","ts":"2025-09-16T23:57:38.321659Z","caller":"rafthttp/stream.go:273","msg":"established TCP streaming connection with remote peer","stream-writer-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"ab9d0391dce79465"}
{"level":"info","ts":"2025-09-16T23:57:38.321995Z","caller":"rafthttp/stream.go:411","msg":"established TCP streaming connection with remote peer","stream-reader-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"ab9d0391dce79465"}
{"level":"info","ts":"2025-09-16T23:57:38.324746Z","caller":"rafthttp/stream.go:248","msg":"set message encoder","from":"aec36adc501070cc","to":"ab9d0391dce79465","stream-type":"stream MsgApp v2"}
{"level":"warn","ts":"2025-09-16T23:57:38.324782Z","caller":"rafthttp/stream.go:264","msg":"closed TCP streaming connection with remote peer","stream-writer-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"ab9d0391dce79465"}
{"level":"info","ts":"2025-09-16T23:57:38.324796Z","caller":"rafthttp/stream.go:273","msg":"established TCP streaming connection with remote peer","stream-writer-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"ab9d0391dce79465"}
{"level":"warn","ts":"2025-09-16T23:57:38.539376Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"192.168.49.4:45372","server-name":"","error":"EOF"}
{"level":"info","ts":"2025-09-16T23:57:38.542781Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"aec36adc501070cc switched to configuration voters=(4226730353838347643 12366044076840555621 12593026477526642892)"}
{"level":"info","ts":"2025-09-16T23:57:38.542928Z","caller":"membership/cluster.go:550","msg":"promote member","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","promoted-member-id":"ab9d0391dce79465"}
{"level":"info","ts":"2025-09-16T23:57:38.542988Z","caller":"etcdserver/server.go:1752","msg":"applied a configuration change through raft","local-member-id":"aec36adc501070cc","raft-conf-change":"ConfChangeAddNode","raft-conf-change-node-id":"ab9d0391dce79465"}
{"level":"info","ts":"2025-09-16T23:57:40.311787Z","caller":"etcdserver/server.go:1856","msg":"sent merged snapshot","from":"aec36adc501070cc","to":"3aa85cdcd5e5557b","bytes":876533,"size":"876 kB","took":"30.009467109s"}
{"level":"info","ts":"2025-09-16T23:57:47.400606Z","caller":"etcdserver/server.go:2246","msg":"skip compaction since there is an inflight snapshot"}
{"level":"info","ts":"2025-09-16T23:57:51.874557Z","caller":"etcdserver/server.go:2246","msg":"skip compaction since there is an inflight snapshot"}
{"level":"info","ts":"2025-09-16T23:58:06.103123Z","caller":"etcdserver/server.go:2246","msg":"skip compaction since there is an inflight snapshot"}
{"level":"info","ts":"2025-09-16T23:58:08.299219Z","caller":"etcdserver/server.go:1856","msg":"sent merged snapshot","from":"aec36adc501070cc","to":"ab9d0391dce79465","bytes":1356737,"size":"1.4 MB","took":"30.011071692s"}
{"level":"info","ts":"2025-09-17T00:06:46.502551Z","caller":"mvcc/index.go:194","msg":"compact tree index","revision":1554}
{"level":"info","ts":"2025-09-17T00:06:46.523688Z","caller":"mvcc/kvstore_compaction.go:70","msg":"finished scheduled compaction","compact-revision":1554,"took":"20.616779ms","hash":4277915431,"current-db-size-bytes":3936256,"current-db-size":"3.9 MB","current-db-size-in-use-bytes":2105344,"current-db-size-in-use":"2.1 MB"}
{"level":"info","ts":"2025-09-17T00:06:46.523839Z","caller":"mvcc/hash.go:157","msg":"storing new hash","hash":4277915431,"revision":1554,"compact-revision":-1}
==> kernel <==
00:10:44 up 2:53, 0 users, load average: 0.15, 0.34, 0.80
Linux ha-472903 6.8.0-1037-gcp #39~22.04.1-Ubuntu SMP Thu Aug 21 17:29:24 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kindnet [cc69d2451cb65860b5bc78e027be2fc1cb0f9fa6542b4abe3bc1ff1c90a8fe60] <==
I0917 00:09:57.503751 1 main.go:324] Node ha-472903-m03 has CIDR [10.244.2.0/24]
I0917 00:10:07.510274 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0917 00:10:07.510320 1 main.go:301] handling current node
I0917 00:10:07.510336 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0917 00:10:07.510341 1 main.go:324] Node ha-472903-m02 has CIDR [10.244.1.0/24]
I0917 00:10:07.510554 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0917 00:10:07.510567 1 main.go:324] Node ha-472903-m03 has CIDR [10.244.2.0/24]
I0917 00:10:17.512521 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0917 00:10:17.512563 1 main.go:301] handling current node
I0917 00:10:17.512582 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0917 00:10:17.512589 1 main.go:324] Node ha-472903-m02 has CIDR [10.244.1.0/24]
I0917 00:10:17.512785 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0917 00:10:17.512800 1 main.go:324] Node ha-472903-m03 has CIDR [10.244.2.0/24]
I0917 00:10:27.511383 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0917 00:10:27.511448 1 main.go:301] handling current node
I0917 00:10:27.511469 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0917 00:10:27.511476 1 main.go:324] Node ha-472903-m02 has CIDR [10.244.1.0/24]
I0917 00:10:27.511660 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0917 00:10:27.511671 1 main.go:324] Node ha-472903-m03 has CIDR [10.244.2.0/24]
I0917 00:10:37.506147 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0917 00:10:37.506186 1 main.go:301] handling current node
I0917 00:10:37.506204 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0917 00:10:37.506209 1 main.go:324] Node ha-472903-m02 has CIDR [10.244.1.0/24]
I0917 00:10:37.506448 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0917 00:10:37.506459 1 main.go:324] Node ha-472903-m03 has CIDR [10.244.2.0/24]
==> kube-apiserver [0aba62132d764965d8e1a80a4a6345bb7e34892b23143da4a7af3450cd465d6c] <==
I0917 00:02:08.464547 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:02:14.110452 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:03:20.793210 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:03:22.342952 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:04:24.690127 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:04:42.485311 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:05:30.551003 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:06:06.800617 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:06:32.710262 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:06:47.441344 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0917 00:07:34.732036 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:07:42.022448 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:08:46.236959 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:08:51.159386 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:09:52.603432 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:09:53.014406 1 stats.go:136] "Error getting keys" err="empty key: \"\""
E0917 00:10:41.954540 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:37534: use of closed network connection
E0917 00:10:42.122977 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:37556: use of closed network connection
E0917 00:10:42.250606 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:37572: use of closed network connection
E0917 00:10:42.442469 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:37584: use of closed network connection
E0917 00:10:42.605380 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:37602: use of closed network connection
E0917 00:10:42.730284 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:37612: use of closed network connection
E0917 00:10:42.884291 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:37626: use of closed network connection
E0917 00:10:43.036952 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:37644: use of closed network connection
E0917 00:10:43.161098 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:37658: use of closed network connection
==> kube-controller-manager [087290a41f59caa4f9bc89759bcec6cf90f47c8a2ab83b7c671a8fff35641df9] <==
I0916 23:56:54.728442 1 shared_informer.go:356] "Caches are synced" controller="service-cidr-controller"
I0916 23:56:54.728466 1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
I0916 23:56:54.728485 1 shared_informer.go:356] "Caches are synced" controller="ReplicaSet"
I0916 23:56:54.728644 1 shared_informer.go:356] "Caches are synced" controller="resource_claim"
I0916 23:56:54.728665 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-legacy-unknown"
I0916 23:56:54.728648 1 shared_informer.go:356] "Caches are synced" controller="ephemeral"
I0916 23:56:54.728914 1 shared_informer.go:356] "Caches are synced" controller="attach detach"
I0916 23:56:54.730175 1 shared_informer.go:356] "Caches are synced" controller="taint-eviction-controller"
I0916 23:56:54.730201 1 shared_informer.go:356] "Caches are synced" controller="TTL after finished"
I0916 23:56:54.732432 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I0916 23:56:54.733452 1 shared_informer.go:356] "Caches are synced" controller="namespace"
I0916 23:56:54.735655 1 shared_informer.go:356] "Caches are synced" controller="node"
I0916 23:56:54.735714 1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller"
I0916 23:56:54.735760 1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller"
I0916 23:56:54.735767 1 shared_informer.go:349] "Waiting for caches to sync" controller="cidrallocator"
I0916 23:56:54.735772 1 shared_informer.go:356] "Caches are synced" controller="cidrallocator"
I0916 23:56:54.740680 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="ha-472903" podCIDRs=["10.244.0.0/24"]
I0916 23:56:54.749950 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I0916 23:57:22.933124 1 actual_state_of_world.go:541] "Failed to update statusUpdateNeeded field in actual state of world" logger="persistentvolume-attach-detach-controller" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"ha-472903-m02\" does not exist"
I0916 23:57:22.943785 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="ha-472903-m02" podCIDRs=["10.244.1.0/24"]
I0916 23:57:24.681339 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="ha-472903-m02"
I0916 23:57:51.749676 1 actual_state_of_world.go:541] "Failed to update statusUpdateNeeded field in actual state of world" logger="persistentvolume-attach-detach-controller" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"ha-472903-m03\" does not exist"
I0916 23:57:51.772476 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="ha-472903-m03" podCIDRs=["10.244.2.0/24"]
E0916 23:57:51.829801 1 daemon_controller.go:346] "Unhandled Error" err="kube-system/kube-proxy failed with : error storing status for daemon set &v1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"kube-proxy\", GenerateName:\"\", Namespace:\"kube-system\", SelfLink:\"\", UID:\"3f5da9fc-6769-4ca8-a715-edeace44c646\", ResourceVersion:\"594\", Generation:1, CreationTimestamp:time.Date(2025, time.September, 16, 23, 56, 49, 0, time.Local), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"k8s-app\":\"kube-proxy\"}, Annotations:map[string]string{\"deprecated.daemonset.template.generation\":\"1\"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.DaemonSetSpec{Selector:(*v1.LabelSelector)(0xc00222d0e0), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:\"\", GenerateName:\"\", Namespace:\"\", SelfLink:\"\", UID:\"\", ResourceVersion:\"
\", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:<nil>, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{\"k8s-app\":\"kube-proxy\"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:\"kube-proxy\", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSourc
e)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(0xc0021ed7c0), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil), Image:(*v1.ImageVolumeSource)(nil)}}, v1.Volume{Name:\"xtables-lock\", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(0xc002fcdce0), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolu
meSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIV
olumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil), Image:(*v1.ImageVolumeSource)(nil)}}, v1.Volume{Name:\"lib-modules\", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(0xc002fcdcf8), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtu
alDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil), Image:(*v1.ImageVolumeSource)(nil)}}}, InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:\"kube-proxy\", Image:\"registry.k8s.io/kube-proxy:v1.34.0\", Command:[]string{\"/usr/local/bin/kube-proxy\", \"--config=/var/lib/kube-proxy/config.conf\", \"--hostname-override=$(NODE_NAME)\"}, Args:[]string(nil), WorkingDir:\"\", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar{v1.EnvVar{Name:\"NODE_NAME\", Value:\"\", ValueFrom:(*v1.EnvVarSource)(0xc00144a7e0)}}, Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.Re
sourceList(nil), Claims:[]v1.ResourceClaim(nil)}, ResizePolicy:[]v1.ContainerResizePolicy(nil), RestartPolicy:(*v1.ContainerRestartPolicy)(nil), RestartPolicyRules:[]v1.ContainerRestartRule(nil), VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:\"kube-proxy\", ReadOnly:false, RecursiveReadOnly:(*v1.RecursiveReadOnlyMode)(nil), MountPath:\"/var/lib/kube-proxy\", SubPath:\"\", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:\"\"}, v1.VolumeMount{Name:\"xtables-lock\", ReadOnly:false, RecursiveReadOnly:(*v1.RecursiveReadOnlyMode)(nil), MountPath:\"/run/xtables.lock\", SubPath:\"\", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:\"\"}, v1.VolumeMount{Name:\"lib-modules\", ReadOnly:true, RecursiveReadOnly:(*v1.RecursiveReadOnlyMode)(nil), MountPath:\"/lib/modules\", SubPath:\"\", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:\"\"}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Life
cycle:(*v1.Lifecycle)(nil), TerminationMessagePath:\"/dev/termination-log\", TerminationMessagePolicy:\"File\", ImagePullPolicy:\"IfNotPresent\", SecurityContext:(*v1.SecurityContext)(0xc0019549c0), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:\"Always\", TerminationGracePeriodSeconds:(*int64)(0xc001900b18), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:\"ClusterFirst\", NodeSelector:map[string]string{\"kubernetes.io/os\":\"linux\"}, ServiceAccountName:\"kube-proxy\", DeprecatedServiceAccount:\"kube-proxy\", AutomountServiceAccountToken:(*bool)(nil), NodeName:\"\", HostNetwork:true, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc001ba1200), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:\"\", Subdomain:\"\", Affinity:(*v1.Affinity)(nil), SchedulerName:\"default-scheduler\", Tolerations:[]v1.Toleration{v1.Toleration{Key:\"\", Operator:\"Exists\", Value:\"\", Effect:\"\", Tole
rationSeconds:(*int64)(nil)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:\"system-node-critical\", Priority:(*int32)(nil), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(nil), PreemptionPolicy:(*v1.PreemptionPolicy)(nil), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil), SetHostnameAsFQDN:(*bool)(nil), OS:(*v1.PodOS)(nil), HostUsers:(*bool)(nil), SchedulingGates:[]v1.PodSchedulingGate(nil), ResourceClaims:[]v1.PodResourceClaim(nil), Resources:(*v1.ResourceRequirements)(nil), HostnameOverride:(*string)(nil)}}, UpdateStrategy:v1.DaemonSetUpdateStrategy{Type:\"RollingUpdate\", RollingUpdate:(*v1.RollingUpdateDaemonSet)(0xc001e14570)}, MinReadySeconds:0, RevisionHistoryLimit:(*int32)(0xc001900b70)}, Status:v1.DaemonSetStatus{CurrentNumberScheduled:2, NumberMisscheduled:0, DesiredNumberScheduled:2, NumberReady:2, ObservedGeneration:1, UpdatedNumberScheduled:2, NumberAvailab
le:2, NumberUnavailable:0, CollisionCount:(*int32)(nil), Conditions:[]v1.DaemonSetCondition(nil)}}: Operation cannot be fulfilled on daemonsets.apps \"kube-proxy\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError"
I0916 23:57:54.685322 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="ha-472903-m03"
==> kube-proxy [92dd4d116eb0387dded82fb32d35690ec2d00e3f5e7ac81bf7aea0c6814edd5e] <==
I0916 23:56:56.831012 1 server_linux.go:53] "Using iptables proxy"
I0916 23:56:56.891635 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I0916 23:56:56.991820 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I0916 23:56:56.991862 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.49.2"]
E0916 23:56:56.991952 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0916 23:56:57.015955 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0916 23:56:57.016001 1 server_linux.go:132] "Using iptables Proxier"
I0916 23:56:57.021120 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0916 23:56:57.021457 1 server.go:527] "Version info" version="v1.34.0"
I0916 23:56:57.021499 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0916 23:56:57.024872 1 config.go:200] "Starting service config controller"
I0916 23:56:57.024892 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I0916 23:56:57.024900 1 config.go:106] "Starting endpoint slice config controller"
I0916 23:56:57.024909 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I0916 23:56:57.024890 1 config.go:403] "Starting serviceCIDR config controller"
I0916 23:56:57.024917 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I0916 23:56:57.024937 1 config.go:309] "Starting node config controller"
I0916 23:56:57.024942 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I0916 23:56:57.125608 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I0916 23:56:57.125691 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I0916 23:56:57.125856 1 shared_informer.go:356] "Caches are synced" controller="service config"
I0916 23:56:57.125902 1 shared_informer.go:356] "Caches are synced" controller="node config"
==> kube-scheduler [bba28cace6502de93aa43db4fb51671581c5074990dea721d98d36d839734a67] <==
E0916 23:56:48.619869 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E0916 23:56:48.649766 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E0916 23:56:48.673092 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
I0916 23:56:49.170967 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
E0916 23:57:51.780040 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"kindnet-x6twd\": pod kindnet-x6twd is already assigned to node \"ha-472903-m03\"" plugin="DefaultBinder" pod="kube-system/kindnet-x6twd" node="ha-472903-m03"
E0916 23:57:51.780142 1 schedule_one.go:379] "scheduler cache ForgetPod failed" err="pod f2346479-1adb-4bc7-af07-971525be2b05(kube-system/kindnet-x6twd) wasn't assumed so cannot be forgotten" logger="UnhandledError" pod="kube-system/kindnet-x6twd"
E0916 23:57:51.780183 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"kindnet-x6twd\": pod kindnet-x6twd is already assigned to node \"ha-472903-m03\"" logger="UnhandledError" pod="kube-system/kindnet-x6twd"
I0916 23:57:51.782132 1 schedule_one.go:1092] "Pod has been assigned to node. Abort adding it back to queue." pod="kube-system/kindnet-x6twd" node="ha-472903-m03"
E0916 23:58:37.948695 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"busybox-7b57f96db7-x6xc9\": pod busybox-7b57f96db7-x6xc9 is already assigned to node \"ha-472903-m02\"" plugin="DefaultBinder" pod="default/busybox-7b57f96db7-x6xc9" node="ha-472903-m02"
E0916 23:58:37.948846 1 schedule_one.go:379] "scheduler cache ForgetPod failed" err="pod 565a634f-ab41-4776-ba5d-63a601bfec48(default/busybox-7b57f96db7-x6xc9) wasn't assumed so cannot be forgotten" logger="UnhandledError" pod="default/busybox-7b57f96db7-x6xc9"
E0916 23:58:37.948875 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"busybox-7b57f96db7-x6xc9\": pod busybox-7b57f96db7-x6xc9 is already assigned to node \"ha-472903-m02\"" logger="UnhandledError" pod="default/busybox-7b57f96db7-x6xc9"
I0916 23:58:37.950251 1 schedule_one.go:1092] "Pod has been assigned to node. Abort adding it back to queue." pod="default/busybox-7b57f96db7-x6xc9" node="ha-472903-m02"
I0916 23:58:37.966099 1 cache.go:512] "Pod was added to a different node than it was assumed" podKey="47b06c15-c007-4c50-a248-5411a0f4b6a7" pod="default/busybox-7b57f96db7-4jfjt" assumedNode="ha-472903-m02" currentNode="ha-472903"
E0916 23:58:37.968241 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"busybox-7b57f96db7-4jfjt\": pod busybox-7b57f96db7-4jfjt is already assigned to node \"ha-472903-m02\"" plugin="DefaultBinder" pod="default/busybox-7b57f96db7-4jfjt" node="ha-472903"
E0916 23:58:37.968351 1 schedule_one.go:379] "scheduler cache ForgetPod failed" err="pod 47b06c15-c007-4c50-a248-5411a0f4b6a7(default/busybox-7b57f96db7-4jfjt) was assumed on ha-472903 but assigned to ha-472903-m02" logger="UnhandledError" pod="default/busybox-7b57f96db7-4jfjt"
E0916 23:58:37.968376 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"busybox-7b57f96db7-4jfjt\": pod busybox-7b57f96db7-4jfjt is already assigned to node \"ha-472903-m02\"" logger="UnhandledError" pod="default/busybox-7b57f96db7-4jfjt"
I0916 23:58:37.969472 1 schedule_one.go:1092] "Pod has been assigned to node. Abort adding it back to queue." pod="default/busybox-7b57f96db7-4jfjt" node="ha-472903-m02"
E0916 23:58:38.002469 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"busybox-7b57f96db7-wp95z\": pod busybox-7b57f96db7-wp95z is being deleted, cannot be assigned to a host" plugin="DefaultBinder" pod="default/busybox-7b57f96db7-wp95z" node="ha-472903"
E0916 23:58:38.002779 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"busybox-7b57f96db7-wp95z\": pod busybox-7b57f96db7-wp95z is being deleted, cannot be assigned to a host" logger="UnhandledError" pod="default/busybox-7b57f96db7-wp95z"
E0916 23:58:38.046394 1 pod_status_patch.go:111] "Failed to patch pod status" err="pods \"busybox-7b57f96db7-xnrsc\" not found" pod="default/busybox-7b57f96db7-xnrsc"
E0916 23:58:38.046880 1 pod_status_patch.go:111] "Failed to patch pod status" err="pods \"busybox-7b57f96db7-wp95z\" not found" pod="default/busybox-7b57f96db7-wp95z"
E0916 23:58:40.050124 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"busybox-7b57f96db7-6hrm6\": pod busybox-7b57f96db7-6hrm6 is already assigned to node \"ha-472903\"" plugin="DefaultBinder" pod="default/busybox-7b57f96db7-6hrm6" node="ha-472903"
E0916 23:58:40.050213 1 schedule_one.go:379] "scheduler cache ForgetPod failed" err="pod bd03bad4-af1e-42d0-81fb-6fcaeaa8775e(default/busybox-7b57f96db7-6hrm6) wasn't assumed so cannot be forgotten" logger="UnhandledError" pod="default/busybox-7b57f96db7-6hrm6"
E0916 23:58:40.050248 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"busybox-7b57f96db7-6hrm6\": pod busybox-7b57f96db7-6hrm6 is already assigned to node \"ha-472903\"" logger="UnhandledError" pod="default/busybox-7b57f96db7-6hrm6"
I0916 23:58:40.051853 1 schedule_one.go:1092] "Pod has been assigned to node. Abort adding it back to queue." pod="default/busybox-7b57f96db7-6hrm6" node="ha-472903"
==> kubelet <==
Sep 16 23:58:38 ha-472903 kubelet[1676]: E0916 23:58:38.235025 1676 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cc7a8d10-408f-4655-ac70-54b4af22d9eb-kube-api-access-hrb62 podName:cc7a8d10-408f-4655-ac70-54b4af22d9eb nodeName:}" failed. No retries permitted until 2025-09-16 23:58:38.735007966 +0000 UTC m=+109.066439678 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-hrb62" (UniqueName: "kubernetes.io/projected/cc7a8d10-408f-4655-ac70-54b4af22d9eb-kube-api-access-hrb62") pod "busybox-7b57f96db7-5pwbb" (UID: "cc7a8d10-408f-4655-ac70-54b4af22d9eb") : failed to fetch token: pod "busybox-7b57f96db7-5pwbb" not found
Sep 16 23:58:38 ha-472903 kubelet[1676]: E0916 23:58:38.737179 1676 projected.go:196] Error preparing data for projected volume kube-api-access-xrpwc for pod default/busybox-7b57f96db7-xj7ks: failed to fetch token: pod "busybox-7b57f96db7-xj7ks" not found
Sep 16 23:58:38 ha-472903 kubelet[1676]: E0916 23:58:38.737266 1676 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cac915f6-7630-4320-b6d2-fd18f3c19a17-kube-api-access-xrpwc podName:cac915f6-7630-4320-b6d2-fd18f3c19a17 nodeName:}" failed. No retries permitted until 2025-09-16 23:58:39.737245356 +0000 UTC m=+110.068677057 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-xrpwc" (UniqueName: "kubernetes.io/projected/cac915f6-7630-4320-b6d2-fd18f3c19a17-kube-api-access-xrpwc") pod "busybox-7b57f96db7-xj7ks" (UID: "cac915f6-7630-4320-b6d2-fd18f3c19a17") : failed to fetch token: pod "busybox-7b57f96db7-xj7ks" not found
Sep 16 23:58:38 ha-472903 kubelet[1676]: E0916 23:58:38.737179 1676 projected.go:196] Error preparing data for projected volume kube-api-access-hrb62 for pod default/busybox-7b57f96db7-5pwbb: failed to fetch token: pod "busybox-7b57f96db7-5pwbb" not found
Sep 16 23:58:38 ha-472903 kubelet[1676]: E0916 23:58:38.737371 1676 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cc7a8d10-408f-4655-ac70-54b4af22d9eb-kube-api-access-hrb62 podName:cc7a8d10-408f-4655-ac70-54b4af22d9eb nodeName:}" failed. No retries permitted until 2025-09-16 23:58:39.737351933 +0000 UTC m=+110.068783647 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-hrb62" (UniqueName: "kubernetes.io/projected/cc7a8d10-408f-4655-ac70-54b4af22d9eb-kube-api-access-hrb62") pod "busybox-7b57f96db7-5pwbb" (UID: "cc7a8d10-408f-4655-ac70-54b4af22d9eb") : failed to fetch token: pod "busybox-7b57f96db7-5pwbb" not found
Sep 16 23:58:39 ha-472903 kubelet[1676]: E0916 23:58:39.027158 1676 status_manager.go:1018] "Failed to get status for pod" err="pods \"busybox-7b57f96db7-5pwbb\" is forbidden: User \"system:node:ha-472903\" cannot get resource \"pods\" in API group \"\" in the namespace \"default\": no relationship found between node 'ha-472903' and this object" podUID="cc7a8d10-408f-4655-ac70-54b4af22d9eb" pod="default/busybox-7b57f96db7-5pwbb"
Sep 16 23:58:39 ha-472903 kubelet[1676]: E0916 23:58:39.028111 1676 status_manager.go:1018] "Failed to get status for pod" err="pods \"busybox-7b57f96db7-xj7ks\" is forbidden: User \"system:node:ha-472903\" cannot get resource \"pods\" in API group \"\" in the namespace \"default\": no relationship found between node 'ha-472903' and this object" podUID="cac915f6-7630-4320-b6d2-fd18f3c19a17" pod="default/busybox-7b57f96db7-xj7ks"
Sep 16 23:58:39 ha-472903 kubelet[1676]: E0916 23:58:39.039445 1676 status_manager.go:1018] "Failed to get status for pod" err="pods \"busybox-7b57f96db7-xj7ks\" is forbidden: User \"system:node:ha-472903\" cannot get resource \"pods\" in API group \"\" in the namespace \"default\": no relationship found between node 'ha-472903' and this object" podUID="cac915f6-7630-4320-b6d2-fd18f3c19a17" pod="default/busybox-7b57f96db7-xj7ks"
Sep 16 23:58:39 ha-472903 kubelet[1676]: E0916 23:58:39.042381 1676 status_manager.go:1018] "Failed to get status for pod" err="pods \"busybox-7b57f96db7-5pwbb\" is forbidden: User \"system:node:ha-472903\" cannot get resource \"pods\" in API group \"\" in the namespace \"default\": no relationship found between node 'ha-472903' and this object" podUID="cc7a8d10-408f-4655-ac70-54b4af22d9eb" pod="default/busybox-7b57f96db7-5pwbb"
Sep 16 23:58:39 ha-472903 kubelet[1676]: I0916 23:58:39.138755 1676 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9njqf\" (UniqueName: \"kubernetes.io/projected/59b9a23c-498d-4802-9790-70931c4a2c06-kube-api-access-9njqf\") pod \"59b9a23c-498d-4802-9790-70931c4a2c06\" (UID: \"59b9a23c-498d-4802-9790-70931c4a2c06\") "
Sep 16 23:58:39 ha-472903 kubelet[1676]: I0916 23:58:39.138821 1676 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-hrb62\" (UniqueName: \"kubernetes.io/projected/cc7a8d10-408f-4655-ac70-54b4af22d9eb-kube-api-access-hrb62\") on node \"ha-472903\" DevicePath \"\""
Sep 16 23:58:39 ha-472903 kubelet[1676]: I0916 23:58:39.138836 1676 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-xrpwc\" (UniqueName: \"kubernetes.io/projected/cac915f6-7630-4320-b6d2-fd18f3c19a17-kube-api-access-xrpwc\") on node \"ha-472903\" DevicePath \"\""
Sep 16 23:58:39 ha-472903 kubelet[1676]: I0916 23:58:39.140952 1676 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59b9a23c-498d-4802-9790-70931c4a2c06-kube-api-access-9njqf" (OuterVolumeSpecName: "kube-api-access-9njqf") pod "59b9a23c-498d-4802-9790-70931c4a2c06" (UID: "59b9a23c-498d-4802-9790-70931c4a2c06"). InnerVolumeSpecName "kube-api-access-9njqf". PluginName "kubernetes.io/projected", VolumeGIDValue ""
Sep 16 23:58:39 ha-472903 kubelet[1676]: I0916 23:58:39.239025 1676 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-9njqf\" (UniqueName: \"kubernetes.io/projected/59b9a23c-498d-4802-9790-70931c4a2c06-kube-api-access-9njqf\") on node \"ha-472903\" DevicePath \"\""
Sep 16 23:58:39 ha-472903 kubelet[1676]: E0916 23:58:39.752137 1676 status_manager.go:1018] "Failed to get status for pod" err="pods \"busybox-7b57f96db7-5pwbb\" is forbidden: User \"system:node:ha-472903\" cannot get resource \"pods\" in API group \"\" in the namespace \"default\": no relationship found between node 'ha-472903' and this object" podUID="cc7a8d10-408f-4655-ac70-54b4af22d9eb" pod="default/busybox-7b57f96db7-5pwbb"
Sep 16 23:58:39 ha-472903 kubelet[1676]: E0916 23:58:39.753199 1676 status_manager.go:1018] "Failed to get status for pod" err="pods \"busybox-7b57f96db7-xj7ks\" is forbidden: User \"system:node:ha-472903\" cannot get resource \"pods\" in API group \"\" in the namespace \"default\": no relationship found between node 'ha-472903' and this object" podUID="cac915f6-7630-4320-b6d2-fd18f3c19a17" pod="default/busybox-7b57f96db7-xj7ks"
Sep 16 23:58:39 ha-472903 kubelet[1676]: I0916 23:58:39.754268 1676 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cac915f6-7630-4320-b6d2-fd18f3c19a17" path="/var/lib/kubelet/pods/cac915f6-7630-4320-b6d2-fd18f3c19a17/volumes"
Sep 16 23:58:39 ha-472903 kubelet[1676]: I0916 23:58:39.754475 1676 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc7a8d10-408f-4655-ac70-54b4af22d9eb" path="/var/lib/kubelet/pods/cc7a8d10-408f-4655-ac70-54b4af22d9eb/volumes"
Sep 16 23:58:40 ha-472903 kubelet[1676]: E0916 23:58:40.056772 1676 status_manager.go:1018] "Failed to get status for pod" err="pods \"busybox-7b57f96db7-5pwbb\" is forbidden: User \"system:node:ha-472903\" cannot get resource \"pods\" in API group \"\" in the namespace \"default\": no relationship found between node 'ha-472903' and this object" podUID="cc7a8d10-408f-4655-ac70-54b4af22d9eb" pod="default/busybox-7b57f96db7-5pwbb"
Sep 16 23:58:40 ha-472903 kubelet[1676]: E0916 23:58:40.057611 1676 status_manager.go:1018] "Failed to get status for pod" err="pods \"busybox-7b57f96db7-xj7ks\" is forbidden: User \"system:node:ha-472903\" cannot get resource \"pods\" in API group \"\" in the namespace \"default\": no relationship found between node 'ha-472903' and this object" podUID="cac915f6-7630-4320-b6d2-fd18f3c19a17" pod="default/busybox-7b57f96db7-xj7ks"
Sep 16 23:58:40 ha-472903 kubelet[1676]: E0916 23:58:40.059208 1676 status_manager.go:1018] "Failed to get status for pod" err="pods \"busybox-7b57f96db7-5pwbb\" is forbidden: User \"system:node:ha-472903\" cannot get resource \"pods\" in API group \"\" in the namespace \"default\": no relationship found between node 'ha-472903' and this object" podUID="cc7a8d10-408f-4655-ac70-54b4af22d9eb" pod="default/busybox-7b57f96db7-5pwbb"
Sep 16 23:58:40 ha-472903 kubelet[1676]: E0916 23:58:40.060512 1676 status_manager.go:1018] "Failed to get status for pod" err="pods \"busybox-7b57f96db7-xj7ks\" is forbidden: User \"system:node:ha-472903\" cannot get resource \"pods\" in API group \"\" in the namespace \"default\": no relationship found between node 'ha-472903' and this object" podUID="cac915f6-7630-4320-b6d2-fd18f3c19a17" pod="default/busybox-7b57f96db7-xj7ks"
Sep 16 23:58:40 ha-472903 kubelet[1676]: I0916 23:58:40.145054 1676 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjkrp\" (UniqueName: \"kubernetes.io/projected/bd03bad4-af1e-42d0-81fb-6fcaeaa8775e-kube-api-access-pjkrp\") pod \"busybox-7b57f96db7-6hrm6\" (UID: \"bd03bad4-af1e-42d0-81fb-6fcaeaa8775e\") " pod="default/busybox-7b57f96db7-6hrm6"
Sep 16 23:58:41 ha-472903 kubelet[1676]: I0916 23:58:41.754549 1676 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59b9a23c-498d-4802-9790-70931c4a2c06" path="/var/lib/kubelet/pods/59b9a23c-498d-4802-9790-70931c4a2c06/volumes"
Sep 16 23:58:43 ha-472903 kubelet[1676]: I0916 23:58:43.049200 1676 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/busybox-7b57f96db7-6hrm6" podStartSLOduration=3.061025393 podStartE2EDuration="5.049179166s" podCreationTimestamp="2025-09-16 23:58:38 +0000 UTC" firstStartedPulling="2025-09-16 23:58:40.45690156 +0000 UTC m=+110.788333264" lastFinishedPulling="2025-09-16 23:58:42.445055322 +0000 UTC m=+112.776487037" observedRunningTime="2025-09-16 23:58:43.049092106 +0000 UTC m=+113.380523828" watchObservedRunningTime="2025-09-16 23:58:43.049179166 +0000 UTC m=+113.380610888"
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p ha-472903 -n ha-472903
helpers_test.go:269: (dbg) Run: kubectl --context ha-472903 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: busybox-7b57f96db7-mknzs
helpers_test.go:282: ======> post-mortem[TestMultiControlPlane/serial/DeployApp]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context ha-472903 describe pod busybox-7b57f96db7-mknzs
helpers_test.go:290: (dbg) kubectl --context ha-472903 describe pod busybox-7b57f96db7-mknzs:
-- stdout --
Name: busybox-7b57f96db7-mknzs
Namespace: default
Priority: 0
Service Account: default
Node: ha-472903-m03/192.168.49.4
Start Time: Tue, 16 Sep 2025 23:58:37 +0000
Labels: app=busybox
pod-template-hash=7b57f96db7
Annotations: <none>
Status: Pending
IP:
IPs: <none>
Controlled By: ReplicaSet/busybox-7b57f96db7
Containers:
busybox:
Container ID:
Image: gcr.io/k8s-minikube/busybox:1.28
Image ID:
Port: <none>
Host Port: <none>
Command:
sleep
3600
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-gmz92 (ro)
Conditions:
Type Status
PodReadyToStartContainers False
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-gmz92:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
Optional: false
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 12m default-scheduler running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "busybox-7b57f96db7-mknzs": pod busybox-7b57f96db7-mknzs is already assigned to node "ha-472903-m03"
Warning FailedScheduling 12m default-scheduler running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "busybox-7b57f96db7-mknzs": pod busybox-7b57f96db7-mknzs is already assigned to node "ha-472903-m03"
Normal Scheduled 12m default-scheduler Successfully assigned default/busybox-7b57f96db7-mknzs to ha-472903-m03
Warning FailedCreatePodSandBox 12m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "72439adc47052c2da00cee62587d780275cf6c2423dee9831567464d4725ee9d": failed to find network info for sandbox "72439adc47052c2da00cee62587d780275cf6c2423dee9831567464d4725ee9d"
Warning FailedCreatePodSandBox 11m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "24ab8b6bd2f38653d2326c375fc81ebf17317e36885547c7b42c011bb95889ed": failed to find network info for sandbox "24ab8b6bd2f38653d2326c375fc81ebf17317e36885547c7b42c011bb95889ed"
Warning FailedCreatePodSandBox 11m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "300fece4c100bc3e68a19e1fa6f46c8a378753727caaaeb1533dab71f234be58": failed to find network info for sandbox "300fece4c100bc3e68a19e1fa6f46c8a378753727caaaeb1533dab71f234be58"
Warning FailedCreatePodSandBox 11m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "e49a14b4de5e24fa450a43c124b2916ad7028d35cbc3b0f74595e68ee161d1d0": failed to find network info for sandbox "e49a14b4de5e24fa450a43c124b2916ad7028d35cbc3b0f74595e68ee161d1d0"
Warning FailedCreatePodSandBox 11m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "efa290ca498f7c70ae29d8d97709edda97bc6b062aac05a3ef6d6a83fbd42797": failed to find network info for sandbox "efa290ca498f7c70ae29d8d97709edda97bc6b062aac05a3ef6d6a83fbd42797"
Warning FailedCreatePodSandBox 11m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "d5851ce1270b1c8994400ecd7bdabadaf895488957ffb5173dcd7e289db1de6c": failed to find network info for sandbox "d5851ce1270b1c8994400ecd7bdabadaf895488957ffb5173dcd7e289db1de6c"
Warning FailedCreatePodSandBox 10m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "11aaa894ae434b08da8122c8f3445d03b4c1e54dfb071596f63a0e4654f49f10": failed to find network info for sandbox "11aaa894ae434b08da8122c8f3445d03b4c1e54dfb071596f63a0e4654f49f10"
Warning FailedCreatePodSandBox 10m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c8126e80126ff891a4935c60cfec55753f6bb51d789c0eb46098b72267c7d53c": failed to find network info for sandbox "c8126e80126ff891a4935c60cfec55753f6bb51d789c0eb46098b72267c7d53c"
Warning FailedCreatePodSandBox 10m kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "1389a2f92f350a6f495c76f80031300b6442a6a0cc67abd4b045ff9150b3fc3a": failed to find network info for sandbox "1389a2f92f350a6f495c76f80031300b6442a6a0cc67abd4b045ff9150b3fc3a"
Warning FailedCreatePodSandBox 2m3s (x38 over 10m) kubelet (combined from similar events): Failed to create pod sandbox: rpc error: code = Unknown desc = failed to setup network for sandbox "c3a9afe91461f3ea405980387ac5fab85785c7cf3f180d2b0f894e1df94ca62d": failed to find network info for sandbox "c3a9afe91461f3ea405980387ac5fab85785c7cf3f180d2b0f894e1df94ca62d"
-- /stdout --
helpers_test.go:293: <<< TestMultiControlPlane/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestMultiControlPlane/serial/DeployApp (727.37s)