=== RUN TestMultiControlPlane/serial/DeployApp
ha_test.go:128: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- apply -f ./testdata/ha/ha-pod-dns-test.yaml
ha_test.go:133: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- rollout status deployment/busybox
ha_test.go:133: (dbg) Done: out/minikube-linux-amd64 -p ha-198834 kubectl -- rollout status deployment/busybox: (3.904621326s)
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:01:39.560963 665399 retry.go:31] will retry after 1.383823161s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:01:41.063454 665399 retry.go:31] will retry after 842.947957ms: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:01:42.026518 665399 retry.go:31] will retry after 2.86026853s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:01:45.009554 665399 retry.go:31] will retry after 3.226009259s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:01:48.353043 665399 retry.go:31] will retry after 4.239694799s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:01:52.715823 665399 retry.go:31] will retry after 4.042048106s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
E0917 00:01:54.644082 665399 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/functional-650494/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:01:56.879154 665399 retry.go:31] will retry after 13.503157036s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:02:10.513619 665399 retry.go:31] will retry after 15.518470945s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:02:26.156426 665399 retry.go:31] will retry after 21.168449959s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
E0917 00:02:35.606769 665399 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/functional-650494/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
I0917 00:02:47.444067 665399 retry.go:31] will retry after 30.971578059s: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:149: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:159: failed to resolve pod IPs: expected 3 Pod IPs but got 2 (may be temporary), output: "\n-- stdout --\n\t'10.244.1.2 10.244.0.4'\n\n-- /stdout --"
ha_test.go:163: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- get pods -o jsonpath='{.items[*].metadata.name}'
ha_test.go:171: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- exec busybox-7b57f96db7-kg4q6 -- nslookup kubernetes.io
ha_test.go:171: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- exec busybox-7b57f96db7-l2jn5 -- nslookup kubernetes.io
ha_test.go:171: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-198834 kubectl -- exec busybox-7b57f96db7-l2jn5 -- nslookup kubernetes.io: exit status 1 (167.233028ms)
-- stdout --
Server: 10.96.0.10
Address 1: 10.96.0.10
-- /stdout --
** stderr **
nslookup: can't resolve 'kubernetes.io'
command terminated with exit code 1
** /stderr **
ha_test.go:173: Pod busybox-7b57f96db7-l2jn5 could not resolve 'kubernetes.io': exit status 1
ha_test.go:171: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- exec busybox-7b57f96db7-pstjp -- nslookup kubernetes.io
ha_test.go:181: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- exec busybox-7b57f96db7-kg4q6 -- nslookup kubernetes.default
ha_test.go:181: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- exec busybox-7b57f96db7-l2jn5 -- nslookup kubernetes.default
ha_test.go:181: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-198834 kubectl -- exec busybox-7b57f96db7-l2jn5 -- nslookup kubernetes.default: exit status 1 (163.292311ms)
-- stdout --
Server: 10.96.0.10
Address 1: 10.96.0.10
-- /stdout --
** stderr **
nslookup: can't resolve 'kubernetes.default'
command terminated with exit code 1
** /stderr **
ha_test.go:183: Pod busybox-7b57f96db7-l2jn5 could not resolve 'kubernetes.default': exit status 1
ha_test.go:181: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- exec busybox-7b57f96db7-pstjp -- nslookup kubernetes.default
ha_test.go:189: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- exec busybox-7b57f96db7-kg4q6 -- nslookup kubernetes.default.svc.cluster.local
ha_test.go:189: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- exec busybox-7b57f96db7-l2jn5 -- nslookup kubernetes.default.svc.cluster.local
ha_test.go:189: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-198834 kubectl -- exec busybox-7b57f96db7-l2jn5 -- nslookup kubernetes.default.svc.cluster.local: exit status 1 (162.762104ms)
-- stdout --
Server: 10.96.0.10
Address 1: 10.96.0.10
-- /stdout --
** stderr **
nslookup: can't resolve 'kubernetes.default.svc.cluster.local'
command terminated with exit code 1
** /stderr **
ha_test.go:191: Pod busybox-7b57f96db7-l2jn5 could not resolve local service (kubernetes.default.svc.cluster.local): exit status 1
ha_test.go:189: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 kubectl -- exec busybox-7b57f96db7-pstjp -- nslookup kubernetes.default.svc.cluster.local
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestMultiControlPlane/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestMultiControlPlane/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect ha-198834
helpers_test.go:243: (dbg) docker inspect ha-198834:
-- stdout --
[
{
"Id": "47e5b1e4a4a54393d95d2fc54ba8e6df0394126726cb08c4999522c520900c51",
"Created": "2025-09-16T23:57:02.499662369Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 722917,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-09-16T23:57:02.530585618Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:c6b5532e987b5b4f5fc9cb0336e378ed49c0542bad8cbfc564b71e977a6269de",
"ResolvConfPath": "/var/lib/docker/containers/47e5b1e4a4a54393d95d2fc54ba8e6df0394126726cb08c4999522c520900c51/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/47e5b1e4a4a54393d95d2fc54ba8e6df0394126726cb08c4999522c520900c51/hostname",
"HostsPath": "/var/lib/docker/containers/47e5b1e4a4a54393d95d2fc54ba8e6df0394126726cb08c4999522c520900c51/hosts",
"LogPath": "/var/lib/docker/containers/47e5b1e4a4a54393d95d2fc54ba8e6df0394126726cb08c4999522c520900c51/47e5b1e4a4a54393d95d2fc54ba8e6df0394126726cb08c4999522c520900c51-json.log",
"Name": "/ha-198834",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"ha-198834:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "ha-198834",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "47e5b1e4a4a54393d95d2fc54ba8e6df0394126726cb08c4999522c520900c51",
"LowerDir": "/var/lib/docker/overlay2/6e23b044058f2d5382195c39b01075877743d56cb3b0f346df896a9277153245-init/diff:/var/lib/docker/overlay2/c570dacd810ac8c787e753d7a3ab5a399cb123b70a29f21b9da6ee575027d4fd/diff",
"MergedDir": "/var/lib/docker/overlay2/6e23b044058f2d5382195c39b01075877743d56cb3b0f346df896a9277153245/merged",
"UpperDir": "/var/lib/docker/overlay2/6e23b044058f2d5382195c39b01075877743d56cb3b0f346df896a9277153245/diff",
"WorkDir": "/var/lib/docker/overlay2/6e23b044058f2d5382195c39b01075877743d56cb3b0f346df896a9277153245/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "ha-198834",
"Source": "/var/lib/docker/volumes/ha-198834/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "ha-198834",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "ha-198834",
"name.minikube.sigs.k8s.io": "ha-198834",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "6698b0ad85a9078b37114c4e66646c6dc7a67a706d28557d80b29fea1d15d512",
"SandboxKey": "/var/run/docker/netns/6698b0ad85a9",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32783"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32784"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32787"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32785"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32786"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"ha-198834": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "62:eb:f5:3a:ee:ff",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "ab651df73000b515d018703342371ce7de7a02a0092c0b9b72849c77d387bab3",
"EndpointID": "669cb4f772890bad35a4ad4cdb1934f42912d7e03fc353fd08c3e3a046cfba54",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"ha-198834",
"47e5b1e4a4a5"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p ha-198834 -n ha-198834
helpers_test.go:252: <<< TestMultiControlPlane/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestMultiControlPlane/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p ha-198834 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p ha-198834 logs -n 25: (1.026758806s)
helpers_test.go:260: TestMultiControlPlane/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ delete │ -p functional-650494 │ functional-650494 │ jenkins │ v1.37.0 │ 16 Sep 25 23:56 UTC │ 16 Sep 25 23:56 UTC │
│ start │ ha-198834 start --ha --memory 3072 --wait true --alsologtostderr -v 5 --driver=docker --container-runtime=docker │ ha-198834 │ jenkins │ v1.37.0 │ 16 Sep 25 23:56 UTC │ 17 Sep 25 00:01 UTC │
│ kubectl │ ha-198834 kubectl -- apply -f ./testdata/ha/ha-pod-dns-test.yaml │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:01 UTC │ 17 Sep 25 00:01 UTC │
│ kubectl │ ha-198834 kubectl -- rollout status deployment/busybox │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:01 UTC │ 17 Sep 25 00:01 UTC │
│ kubectl │ ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:01 UTC │ 17 Sep 25 00:01 UTC │
│ kubectl │ ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:01 UTC │ 17 Sep 25 00:01 UTC │
│ kubectl │ ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:01 UTC │ 17 Sep 25 00:01 UTC │
│ kubectl │ ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:01 UTC │ 17 Sep 25 00:01 UTC │
│ kubectl │ ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:01 UTC │ 17 Sep 25 00:01 UTC │
│ kubectl │ ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:01 UTC │ 17 Sep 25 00:01 UTC │
│ kubectl │ ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:01 UTC │ 17 Sep 25 00:01 UTC │
│ kubectl │ ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:02 UTC │ 17 Sep 25 00:02 UTC │
│ kubectl │ ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:02 UTC │ 17 Sep 25 00:02 UTC │
│ kubectl │ ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:02 UTC │ 17 Sep 25 00:02 UTC │
│ kubectl │ ha-198834 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:03 UTC │ 17 Sep 25 00:03 UTC │
│ kubectl │ ha-198834 kubectl -- get pods -o jsonpath='{.items[*].metadata.name}' │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:03 UTC │ 17 Sep 25 00:03 UTC │
│ kubectl │ ha-198834 kubectl -- exec busybox-7b57f96db7-kg4q6 -- nslookup kubernetes.io │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:03 UTC │ 17 Sep 25 00:03 UTC │
│ kubectl │ ha-198834 kubectl -- exec busybox-7b57f96db7-l2jn5 -- nslookup kubernetes.io │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:03 UTC │ │
│ kubectl │ ha-198834 kubectl -- exec busybox-7b57f96db7-pstjp -- nslookup kubernetes.io │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:03 UTC │ 17 Sep 25 00:03 UTC │
│ kubectl │ ha-198834 kubectl -- exec busybox-7b57f96db7-kg4q6 -- nslookup kubernetes.default │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:03 UTC │ 17 Sep 25 00:03 UTC │
│ kubectl │ ha-198834 kubectl -- exec busybox-7b57f96db7-l2jn5 -- nslookup kubernetes.default │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:03 UTC │ │
│ kubectl │ ha-198834 kubectl -- exec busybox-7b57f96db7-pstjp -- nslookup kubernetes.default │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:03 UTC │ 17 Sep 25 00:03 UTC │
│ kubectl │ ha-198834 kubectl -- exec busybox-7b57f96db7-kg4q6 -- nslookup kubernetes.default.svc.cluster.local │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:03 UTC │ 17 Sep 25 00:03 UTC │
│ kubectl │ ha-198834 kubectl -- exec busybox-7b57f96db7-l2jn5 -- nslookup kubernetes.default.svc.cluster.local │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:03 UTC │ │
│ kubectl │ ha-198834 kubectl -- exec busybox-7b57f96db7-pstjp -- nslookup kubernetes.default.svc.cluster.local │ ha-198834 │ jenkins │ v1.37.0 │ 17 Sep 25 00:03 UTC │ 17 Sep 25 00:03 UTC │
└─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/09/16 23:56:58
Running on machine: ubuntu-20-agent
Binary: Built with gc go1.24.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0916 23:56:58.042095 722351 out.go:360] Setting OutFile to fd 1 ...
I0916 23:56:58.042245 722351 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0916 23:56:58.042257 722351 out.go:374] Setting ErrFile to fd 2...
I0916 23:56:58.042263 722351 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0916 23:56:58.042455 722351 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21550-661878/.minikube/bin
I0916 23:56:58.043028 722351 out.go:368] Setting JSON to false
I0916 23:56:58.043951 722351 start.go:130] hostinfo: {"hostname":"ubuntu-20-agent","uptime":9550,"bootTime":1758057468,"procs":194,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1037-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0916 23:56:58.044043 722351 start.go:140] virtualization: kvm guest
I0916 23:56:58.045935 722351 out.go:179] * [ha-198834] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I0916 23:56:58.047229 722351 notify.go:220] Checking for updates...
I0916 23:56:58.047241 722351 out.go:179] - MINIKUBE_LOCATION=21550
I0916 23:56:58.048693 722351 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0916 23:56:58.049858 722351 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21550-661878/kubeconfig
I0916 23:56:58.051172 722351 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21550-661878/.minikube
I0916 23:56:58.052335 722351 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I0916 23:56:58.053390 722351 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I0916 23:56:58.054603 722351 driver.go:421] Setting default libvirt URI to qemu:///system
I0916 23:56:58.077260 722351 docker.go:123] docker version: linux-28.4.0:Docker Engine - Community
I0916 23:56:58.077444 722351 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0916 23:56:58.132853 722351 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:28 OomKillDisable:false NGoroutines:47 SystemTime:2025-09-16 23:56:58.122248025 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1037-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652170752 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent Labels:[] ExperimentalBuild:false ServerVersion:28.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[
Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.27.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.39.2] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner (EXPERIMENTAL) Vendor:Docker Inc. Version:v0.1.39] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0916 23:56:58.132998 722351 docker.go:318] overlay module found
I0916 23:56:58.135611 722351 out.go:179] * Using the docker driver based on user configuration
I0916 23:56:58.136750 722351 start.go:304] selected driver: docker
I0916 23:56:58.136770 722351 start.go:918] validating driver "docker" against <nil>
I0916 23:56:58.136782 722351 start.go:929] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0916 23:56:58.137364 722351 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0916 23:56:58.190249 722351 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:28 OomKillDisable:false NGoroutines:47 SystemTime:2025-09-16 23:56:58.179811473 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1037-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652170752 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent Labels:[] ExperimentalBuild:false ServerVersion:28.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[
Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.27.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.39.2] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner (EXPERIMENTAL) Vendor:Docker Inc. Version:v0.1.39] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0916 23:56:58.190455 722351 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I0916 23:56:58.190736 722351 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0916 23:56:58.192641 722351 out.go:179] * Using Docker driver with root privileges
I0916 23:56:58.193978 722351 cni.go:84] Creating CNI manager for ""
I0916 23:56:58.194069 722351 cni.go:136] multinode detected (0 nodes found), recommending kindnet
I0916 23:56:58.194094 722351 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I0916 23:56:58.194188 722351 start.go:348] cluster config:
{Name:ha-198834 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-198834 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin
:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0916 23:56:58.195605 722351 out.go:179] * Starting "ha-198834" primary control-plane node in "ha-198834" cluster
I0916 23:56:58.196688 722351 cache.go:123] Beginning downloading kic base image for docker with docker
I0916 23:56:58.197669 722351 out.go:179] * Pulling base image v0.0.48 ...
I0916 23:56:58.198952 722351 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0916 23:56:58.199018 722351 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21550-661878/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4
I0916 23:56:58.199034 722351 cache.go:58] Caching tarball of preloaded images
I0916 23:56:58.199064 722351 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0916 23:56:58.199149 722351 preload.go:172] Found /home/jenkins/minikube-integration/21550-661878/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0916 23:56:58.199167 722351 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on docker
I0916 23:56:58.199618 722351 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/config.json ...
I0916 23:56:58.199650 722351 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/config.json: {Name:mkfd30616e0167206552e80675557cfcc4fee172 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:56:58.218451 722351 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0916 23:56:58.218470 722351 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0916 23:56:58.218487 722351 cache.go:232] Successfully downloaded all kic artifacts
I0916 23:56:58.218525 722351 start.go:360] acquireMachinesLock for ha-198834: {Name:mk72787ec2f43d39f6405224749d27e293a28eb6 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0916 23:56:58.218643 722351 start.go:364] duration metric: took 94.227µs to acquireMachinesLock for "ha-198834"
I0916 23:56:58.218683 722351 start.go:93] Provisioning new machine with config: &{Name:ha-198834 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-198834 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APISer
verIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: Socket
VMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0916 23:56:58.218779 722351 start.go:125] createHost starting for "" (driver="docker")
I0916 23:56:58.220943 722351 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0916 23:56:58.221292 722351 start.go:159] libmachine.API.Create for "ha-198834" (driver="docker")
I0916 23:56:58.221335 722351 client.go:168] LocalClient.Create starting
I0916 23:56:58.221405 722351 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem
I0916 23:56:58.221441 722351 main.go:141] libmachine: Decoding PEM data...
I0916 23:56:58.221461 722351 main.go:141] libmachine: Parsing certificate...
I0916 23:56:58.221543 722351 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21550-661878/.minikube/certs/cert.pem
I0916 23:56:58.221570 722351 main.go:141] libmachine: Decoding PEM data...
I0916 23:56:58.221588 722351 main.go:141] libmachine: Parsing certificate...
I0916 23:56:58.221956 722351 cli_runner.go:164] Run: docker network inspect ha-198834 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0916 23:56:58.238665 722351 cli_runner.go:211] docker network inspect ha-198834 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0916 23:56:58.238743 722351 network_create.go:284] running [docker network inspect ha-198834] to gather additional debugging logs...
I0916 23:56:58.238769 722351 cli_runner.go:164] Run: docker network inspect ha-198834
W0916 23:56:58.254999 722351 cli_runner.go:211] docker network inspect ha-198834 returned with exit code 1
I0916 23:56:58.255086 722351 network_create.go:287] error running [docker network inspect ha-198834]: docker network inspect ha-198834: exit status 1
stdout:
[]
stderr:
Error response from daemon: network ha-198834 not found
I0916 23:56:58.255122 722351 network_create.go:289] output of [docker network inspect ha-198834]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network ha-198834 not found
** /stderr **
I0916 23:56:58.255285 722351 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0916 23:56:58.272422 722351 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001b56820}
I0916 23:56:58.272473 722351 network_create.go:124] attempt to create docker network ha-198834 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0916 23:56:58.272524 722351 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=ha-198834 ha-198834
I0916 23:56:58.332062 722351 network_create.go:108] docker network ha-198834 192.168.49.0/24 created
I0916 23:56:58.332109 722351 kic.go:121] calculated static IP "192.168.49.2" for the "ha-198834" container
I0916 23:56:58.332180 722351 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0916 23:56:58.347722 722351 cli_runner.go:164] Run: docker volume create ha-198834 --label name.minikube.sigs.k8s.io=ha-198834 --label created_by.minikube.sigs.k8s.io=true
I0916 23:56:58.365722 722351 oci.go:103] Successfully created a docker volume ha-198834
I0916 23:56:58.365811 722351 cli_runner.go:164] Run: docker run --rm --name ha-198834-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-198834 --entrypoint /usr/bin/test -v ha-198834:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0916 23:56:58.752716 722351 oci.go:107] Successfully prepared a docker volume ha-198834
I0916 23:56:58.752766 722351 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0916 23:56:58.752791 722351 kic.go:194] Starting extracting preloaded images to volume ...
I0916 23:56:58.752860 722351 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-661878/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-198834:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0916 23:57:02.431811 722351 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-661878/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-198834:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (3.678879308s)
I0916 23:57:02.431852 722351 kic.go:203] duration metric: took 3.679056906s to extract preloaded images to volume ...
W0916 23:57:02.431981 722351 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0916 23:57:02.432030 722351 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0916 23:57:02.432094 722351 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0916 23:57:02.483868 722351 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ha-198834 --name ha-198834 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-198834 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ha-198834 --network ha-198834 --ip 192.168.49.2 --volume ha-198834:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0916 23:57:02.749244 722351 cli_runner.go:164] Run: docker container inspect ha-198834 --format={{.State.Running}}
I0916 23:57:02.769059 722351 cli_runner.go:164] Run: docker container inspect ha-198834 --format={{.State.Status}}
I0916 23:57:02.787342 722351 cli_runner.go:164] Run: docker exec ha-198834 stat /var/lib/dpkg/alternatives/iptables
I0916 23:57:02.836161 722351 oci.go:144] the created container "ha-198834" has a running status.
I0916 23:57:02.836195 722351 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834/id_rsa...
I0916 23:57:03.023198 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0916 23:57:03.023332 722351 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0916 23:57:03.051071 722351 cli_runner.go:164] Run: docker container inspect ha-198834 --format={{.State.Status}}
I0916 23:57:03.071057 722351 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0916 23:57:03.071081 722351 kic_runner.go:114] Args: [docker exec --privileged ha-198834 chown docker:docker /home/docker/.ssh/authorized_keys]
I0916 23:57:03.121506 722351 cli_runner.go:164] Run: docker container inspect ha-198834 --format={{.State.Status}}
I0916 23:57:03.138447 722351 machine.go:93] provisionDockerMachine start ...
I0916 23:57:03.138553 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:57:03.156407 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:03.156657 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0916 23:57:03.156674 722351 main.go:141] libmachine: About to run SSH command:
hostname
I0916 23:57:03.295893 722351 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-198834
I0916 23:57:03.295938 722351 ubuntu.go:182] provisioning hostname "ha-198834"
I0916 23:57:03.296023 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:57:03.314748 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:03.314993 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0916 23:57:03.315008 722351 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-198834 && echo "ha-198834" | sudo tee /etc/hostname
I0916 23:57:03.463642 722351 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-198834
I0916 23:57:03.463716 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:57:03.480946 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:03.481224 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0916 23:57:03.481264 722351 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-198834' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-198834/g' /etc/hosts;
else
echo '127.0.1.1 ha-198834' | sudo tee -a /etc/hosts;
fi
fi
I0916 23:57:03.616528 722351 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0916 23:57:03.616561 722351 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21550-661878/.minikube CaCertPath:/home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21550-661878/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21550-661878/.minikube}
I0916 23:57:03.616587 722351 ubuntu.go:190] setting up certificates
I0916 23:57:03.616603 722351 provision.go:84] configureAuth start
I0916 23:57:03.616666 722351 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-198834
I0916 23:57:03.633505 722351 provision.go:143] copyHostCerts
I0916 23:57:03.633553 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/21550-661878/.minikube/cert.pem
I0916 23:57:03.633590 722351 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-661878/.minikube/cert.pem, removing ...
I0916 23:57:03.633601 722351 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-661878/.minikube/cert.pem
I0916 23:57:03.633689 722351 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21550-661878/.minikube/cert.pem (1123 bytes)
I0916 23:57:03.633796 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/21550-661878/.minikube/key.pem
I0916 23:57:03.633824 722351 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-661878/.minikube/key.pem, removing ...
I0916 23:57:03.633834 722351 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-661878/.minikube/key.pem
I0916 23:57:03.633870 722351 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21550-661878/.minikube/key.pem (1679 bytes)
I0916 23:57:03.633969 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/21550-661878/.minikube/ca.pem
I0916 23:57:03.633996 722351 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-661878/.minikube/ca.pem, removing ...
I0916 23:57:03.634007 722351 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-661878/.minikube/ca.pem
I0916 23:57:03.634050 722351 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21550-661878/.minikube/ca.pem (1078 bytes)
I0916 23:57:03.634188 722351 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21550-661878/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca-key.pem org=jenkins.ha-198834 san=[127.0.0.1 192.168.49.2 ha-198834 localhost minikube]
I0916 23:57:03.786555 722351 provision.go:177] copyRemoteCerts
I0916 23:57:03.786617 722351 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0916 23:57:03.786691 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:57:03.804115 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834/id_rsa Username:docker}
I0916 23:57:03.900955 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/machines/server.pem -> /etc/docker/server.pem
I0916 23:57:03.901014 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/machines/server.pem --> /etc/docker/server.pem (1200 bytes)
I0916 23:57:03.928655 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0916 23:57:03.928721 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0916 23:57:03.953468 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0916 23:57:03.953537 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0916 23:57:03.978330 722351 provision.go:87] duration metric: took 361.708211ms to configureAuth
I0916 23:57:03.978356 722351 ubuntu.go:206] setting minikube options for container-runtime
I0916 23:57:03.978536 722351 config.go:182] Loaded profile config "ha-198834": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0916 23:57:03.978599 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:57:03.995700 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:03.995934 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0916 23:57:03.995954 722351 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0916 23:57:04.131514 722351 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0916 23:57:04.131541 722351 ubuntu.go:71] root file system type: overlay
I0916 23:57:04.131675 722351 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0916 23:57:04.131752 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:57:04.148752 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:04.148996 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0916 23:57:04.149060 722351 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0916 23:57:04.298185 722351 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I0916 23:57:04.298270 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:57:04.315091 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:04.315309 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0916 23:57:04.315326 722351 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0916 23:57:05.420254 722351 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-09-03 20:55:49.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2025-09-16 23:57:04.295122578 +0000
@@ -9,23 +9,34 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0916 23:57:05.420296 722351 machine.go:96] duration metric: took 2.281822221s to provisionDockerMachine
I0916 23:57:05.420315 722351 client.go:171] duration metric: took 7.198967751s to LocalClient.Create
I0916 23:57:05.420340 722351 start.go:167] duration metric: took 7.199048943s to libmachine.API.Create "ha-198834"
I0916 23:57:05.420350 722351 start.go:293] postStartSetup for "ha-198834" (driver="docker")
I0916 23:57:05.420364 722351 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0916 23:57:05.420443 722351 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0916 23:57:05.420495 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:57:05.437726 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834/id_rsa Username:docker}
I0916 23:57:05.536164 722351 ssh_runner.go:195] Run: cat /etc/os-release
I0916 23:57:05.539580 722351 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0916 23:57:05.539616 722351 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0916 23:57:05.539633 722351 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0916 23:57:05.539639 722351 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0916 23:57:05.539653 722351 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-661878/.minikube/addons for local assets ...
I0916 23:57:05.539713 722351 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-661878/.minikube/files for local assets ...
I0916 23:57:05.539819 722351 filesync.go:149] local asset: /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem -> 6653992.pem in /etc/ssl/certs
I0916 23:57:05.539836 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem -> /etc/ssl/certs/6653992.pem
I0916 23:57:05.540001 722351 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0916 23:57:05.548691 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem --> /etc/ssl/certs/6653992.pem (1708 bytes)
I0916 23:57:05.575226 722351 start.go:296] duration metric: took 154.859714ms for postStartSetup
I0916 23:57:05.575586 722351 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-198834
I0916 23:57:05.591876 722351 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/config.json ...
I0916 23:57:05.592351 722351 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0916 23:57:05.592412 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:57:05.609076 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834/id_rsa Username:docker}
I0916 23:57:05.701881 722351 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0916 23:57:05.706378 722351 start.go:128] duration metric: took 7.487581015s to createHost
I0916 23:57:05.706400 722351 start.go:83] releasing machines lock for "ha-198834", held for 7.487744986s
I0916 23:57:05.706457 722351 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-198834
I0916 23:57:05.723047 722351 ssh_runner.go:195] Run: cat /version.json
I0916 23:57:05.723106 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:57:05.723117 722351 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0916 23:57:05.723202 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:57:05.739830 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834/id_rsa Username:docker}
I0916 23:57:05.739978 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834/id_rsa Username:docker}
I0916 23:57:05.900291 722351 ssh_runner.go:195] Run: systemctl --version
I0916 23:57:05.905029 722351 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0916 23:57:05.909440 722351 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0916 23:57:05.939050 722351 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0916 23:57:05.939153 722351 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0916 23:57:05.968631 722351 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0916 23:57:05.968659 722351 start.go:495] detecting cgroup driver to use...
I0916 23:57:05.968693 722351 detect.go:190] detected "systemd" cgroup driver on host os
I0916 23:57:05.968830 722351 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0916 23:57:05.985490 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0916 23:57:05.997349 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0916 23:57:06.007949 722351 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0916 23:57:06.008036 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0916 23:57:06.018490 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0916 23:57:06.028804 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0916 23:57:06.039330 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0916 23:57:06.049816 722351 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0916 23:57:06.059493 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0916 23:57:06.069825 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0916 23:57:06.080461 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0916 23:57:06.091039 722351 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0916 23:57:06.100019 722351 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0916 23:57:06.109126 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:06.178675 722351 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0916 23:57:06.251706 722351 start.go:495] detecting cgroup driver to use...
I0916 23:57:06.251760 722351 detect.go:190] detected "systemd" cgroup driver on host os
I0916 23:57:06.251809 722351 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0916 23:57:06.264383 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0916 23:57:06.275792 722351 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0916 23:57:06.294666 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0916 23:57:06.306227 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0916 23:57:06.317564 722351 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0916 23:57:06.334759 722351 ssh_runner.go:195] Run: which cri-dockerd
I0916 23:57:06.338327 722351 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0916 23:57:06.348543 722351 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I0916 23:57:06.366680 722351 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0916 23:57:06.432452 722351 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0916 23:57:06.496386 722351 docker.go:575] configuring docker to use "systemd" as cgroup driver...
I0916 23:57:06.496496 722351 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes)
I0916 23:57:06.515617 722351 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I0916 23:57:06.527317 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:06.590441 722351 ssh_runner.go:195] Run: sudo systemctl restart docker
I0916 23:57:07.360810 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0916 23:57:07.372759 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0916 23:57:07.384493 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0916 23:57:07.396808 722351 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0916 23:57:07.466973 722351 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0916 23:57:07.538629 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:07.607976 722351 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0916 23:57:07.630119 722351 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I0916 23:57:07.642121 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:07.709050 722351 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0916 23:57:07.784177 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0916 23:57:07.797686 722351 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0916 23:57:07.797763 722351 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0916 23:57:07.801576 722351 start.go:563] Will wait 60s for crictl version
I0916 23:57:07.801630 722351 ssh_runner.go:195] Run: which crictl
I0916 23:57:07.804977 722351 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0916 23:57:07.837851 722351 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.4.0
RuntimeApiVersion: v1
I0916 23:57:07.837957 722351 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0916 23:57:07.862098 722351 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0916 23:57:07.888678 722351 out.go:252] * Preparing Kubernetes v1.34.0 on Docker 28.4.0 ...
I0916 23:57:07.888755 722351 cli_runner.go:164] Run: docker network inspect ha-198834 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0916 23:57:07.905526 722351 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0916 23:57:07.909605 722351 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0916 23:57:07.921677 722351 kubeadm.go:875] updating cluster {Name:ha-198834 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-198834 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIP
s:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath:
SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0916 23:57:07.921793 722351 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0916 23:57:07.921842 722351 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0916 23:57:07.943020 722351 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.34.0
registry.k8s.io/kube-controller-manager:v1.34.0
registry.k8s.io/kube-proxy:v1.34.0
registry.k8s.io/kube-scheduler:v1.34.0
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0916 23:57:07.943041 722351 docker.go:621] Images already preloaded, skipping extraction
I0916 23:57:07.943097 722351 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0916 23:57:07.963583 722351 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.34.0
registry.k8s.io/kube-controller-manager:v1.34.0
registry.k8s.io/kube-proxy:v1.34.0
registry.k8s.io/kube-scheduler:v1.34.0
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0916 23:57:07.963609 722351 cache_images.go:85] Images are preloaded, skipping loading
I0916 23:57:07.963623 722351 kubeadm.go:926] updating node { 192.168.49.2 8443 v1.34.0 docker true true} ...
I0916 23:57:07.963750 722351 kubeadm.go:938] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-198834 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:ha-198834 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0916 23:57:07.963822 722351 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0916 23:57:08.012977 722351 cni.go:84] Creating CNI manager for ""
I0916 23:57:08.013007 722351 cni.go:136] multinode detected (1 nodes found), recommending kindnet
I0916 23:57:08.013021 722351 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0916 23:57:08.013044 722351 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.34.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:ha-198834 NodeName:ha-198834 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/man
ifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0916 23:57:08.013180 722351 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "ha-198834"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0916 23:57:08.013203 722351 kube-vip.go:115] generating kube-vip config ...
I0916 23:57:08.013244 722351 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I0916 23:57:08.026529 722351 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I0916 23:57:08.026652 722351 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v1.0.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/super-admin.conf"
name: kubeconfig
status: {}
I0916 23:57:08.026716 722351 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0916 23:57:08.036301 722351 binaries.go:44] Found k8s binaries, skipping transfer
I0916 23:57:08.036379 722351 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube /etc/kubernetes/manifests
I0916 23:57:08.046128 722351 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (308 bytes)
I0916 23:57:08.064738 722351 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0916 23:57:08.083216 722351 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2209 bytes)
I0916 23:57:08.101114 722351 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1364 bytes)
I0916 23:57:08.121332 722351 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I0916 23:57:08.125035 722351 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0916 23:57:08.136734 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:08.207460 722351 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0916 23:57:08.231438 722351 certs.go:68] Setting up /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834 for IP: 192.168.49.2
I0916 23:57:08.231468 722351 certs.go:194] generating shared ca certs ...
I0916 23:57:08.231491 722351 certs.go:226] acquiring lock for ca certs: {Name:mk24ad2a96dc59b16a9413b27c57b0ccb7d8ca57 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:08.231634 722351 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21550-661878/.minikube/ca.key
I0916 23:57:08.231682 722351 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21550-661878/.minikube/proxy-client-ca.key
I0916 23:57:08.231692 722351 certs.go:256] generating profile certs ...
I0916 23:57:08.231748 722351 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.key
I0916 23:57:08.231761 722351 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.crt with IP's: []
I0916 23:57:08.595971 722351 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.crt ...
I0916 23:57:08.596008 722351 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.crt: {Name:mk045c8005e18afdd173496398fb640e85421530 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:08.596237 722351 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.key ...
I0916 23:57:08.596255 722351 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.key: {Name:mkec7f349d5172bad8ab50dce27926cf4a2810b8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:08.596372 722351 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key.c9168e28
I0916 23:57:08.596390 722351 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt.c9168e28 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.254]
I0916 23:57:08.930707 722351 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt.c9168e28 ...
I0916 23:57:08.930740 722351 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt.c9168e28: {Name:mke8743bf1c0faa0b20cb0336c0e1879fcb77e1e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:08.930956 722351 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key.c9168e28 ...
I0916 23:57:08.930975 722351 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key.c9168e28: {Name:mkd63d446f2fe51bc154cd1e5df7f39c484f911b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:08.931094 722351 certs.go:381] copying /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt.c9168e28 -> /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt
I0916 23:57:08.931221 722351 certs.go:385] copying /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key.c9168e28 -> /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key
I0916 23:57:08.931283 722351 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.key
I0916 23:57:08.931298 722351 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.crt with IP's: []
I0916 23:57:09.286083 722351 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.crt ...
I0916 23:57:09.286118 722351 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.crt: {Name:mk7d8f9e6931aff0b35e5110e6bb582a3f00c824 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:09.286322 722351 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.key ...
I0916 23:57:09.286339 722351 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.key: {Name:mkaeef389ff7f9a0b6729cce56a45b0b3aa13296 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:09.286448 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0916 23:57:09.286467 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0916 23:57:09.286479 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0916 23:57:09.286489 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0916 23:57:09.286513 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0916 23:57:09.286527 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0916 23:57:09.286538 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0916 23:57:09.286550 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0916 23:57:09.286602 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/665399.pem (1338 bytes)
W0916 23:57:09.286641 722351 certs.go:480] ignoring /home/jenkins/minikube-integration/21550-661878/.minikube/certs/665399_empty.pem, impossibly tiny 0 bytes
I0916 23:57:09.286650 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca-key.pem (1675 bytes)
I0916 23:57:09.286674 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem (1078 bytes)
I0916 23:57:09.286702 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/cert.pem (1123 bytes)
I0916 23:57:09.286730 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/key.pem (1679 bytes)
I0916 23:57:09.286767 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem (1708 bytes)
I0916 23:57:09.286792 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:09.286805 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/665399.pem -> /usr/share/ca-certificates/665399.pem
I0916 23:57:09.286817 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem -> /usr/share/ca-certificates/6653992.pem
I0916 23:57:09.287381 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0916 23:57:09.312982 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0916 23:57:09.337940 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0916 23:57:09.362347 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0916 23:57:09.386557 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I0916 23:57:09.412140 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0916 23:57:09.436893 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0916 23:57:09.461871 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0916 23:57:09.487876 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0916 23:57:09.516060 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/certs/665399.pem --> /usr/share/ca-certificates/665399.pem (1338 bytes)
I0916 23:57:09.541440 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem --> /usr/share/ca-certificates/6653992.pem (1708 bytes)
I0916 23:57:09.567069 722351 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0916 23:57:09.585649 722351 ssh_runner.go:195] Run: openssl version
I0916 23:57:09.591504 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0916 23:57:09.602004 722351 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:09.605727 722351 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 16 23:48 /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:09.605791 722351 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:09.612679 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0916 23:57:09.622556 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/665399.pem && ln -fs /usr/share/ca-certificates/665399.pem /etc/ssl/certs/665399.pem"
I0916 23:57:09.632414 722351 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/665399.pem
I0916 23:57:09.636379 722351 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 16 23:53 /usr/share/ca-certificates/665399.pem
I0916 23:57:09.636441 722351 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/665399.pem
I0916 23:57:09.643659 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/665399.pem /etc/ssl/certs/51391683.0"
I0916 23:57:09.653893 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/6653992.pem && ln -fs /usr/share/ca-certificates/6653992.pem /etc/ssl/certs/6653992.pem"
I0916 23:57:09.663837 722351 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/6653992.pem
I0916 23:57:09.667554 722351 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 16 23:53 /usr/share/ca-certificates/6653992.pem
I0916 23:57:09.667899 722351 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/6653992.pem
I0916 23:57:09.675833 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/6653992.pem /etc/ssl/certs/3ec20f2e.0"
I0916 23:57:09.686032 722351 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0916 23:57:09.689851 722351 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0916 23:57:09.689923 722351 kubeadm.go:392] StartCluster: {Name:ha-198834 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-198834 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[
] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: So
cketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0916 23:57:09.690062 722351 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0916 23:57:09.708774 722351 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0916 23:57:09.718368 722351 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0916 23:57:09.727825 722351 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0916 23:57:09.727888 722351 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0916 23:57:09.738106 722351 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0916 23:57:09.738126 722351 kubeadm.go:157] found existing configuration files:
I0916 23:57:09.738165 722351 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0916 23:57:09.747962 722351 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0916 23:57:09.748017 722351 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0916 23:57:09.757385 722351 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0916 23:57:09.766772 722351 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0916 23:57:09.766839 722351 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0916 23:57:09.775735 722351 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0916 23:57:09.784848 722351 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0916 23:57:09.784955 722351 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0916 23:57:09.793751 722351 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0916 23:57:09.803170 722351 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0916 23:57:09.803229 722351 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0916 23:57:09.811944 722351 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0916 23:57:09.867145 722351 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1037-gcp\n", err: exit status 1
I0916 23:57:09.919246 722351 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0916 23:57:19.614241 722351 kubeadm.go:310] [init] Using Kubernetes version: v1.34.0
I0916 23:57:19.614308 722351 kubeadm.go:310] [preflight] Running pre-flight checks
I0916 23:57:19.614466 722351 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0916 23:57:19.614561 722351 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1037-gcp[0m
I0916 23:57:19.614607 722351 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0916 23:57:19.614692 722351 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0916 23:57:19.614771 722351 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0916 23:57:19.614837 722351 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0916 23:57:19.614899 722351 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0916 23:57:19.614977 722351 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0916 23:57:19.615057 722351 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0916 23:57:19.615125 722351 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0916 23:57:19.615202 722351 kubeadm.go:310] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I0916 23:57:19.615307 722351 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0916 23:57:19.615454 722351 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0916 23:57:19.615594 722351 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0916 23:57:19.615688 722351 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0916 23:57:19.618162 722351 out.go:252] - Generating certificates and keys ...
I0916 23:57:19.618260 722351 kubeadm.go:310] [certs] Using existing ca certificate authority
I0916 23:57:19.618349 722351 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0916 23:57:19.618445 722351 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0916 23:57:19.618533 722351 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0916 23:57:19.618635 722351 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0916 23:57:19.618717 722351 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0916 23:57:19.618792 722351 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0916 23:57:19.618993 722351 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [ha-198834 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0916 23:57:19.619071 722351 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0916 23:57:19.619249 722351 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [ha-198834 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0916 23:57:19.619335 722351 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0916 23:57:19.619434 722351 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0916 23:57:19.619517 722351 kubeadm.go:310] [certs] Generating "sa" key and public key
I0916 23:57:19.619599 722351 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0916 23:57:19.619679 722351 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0916 23:57:19.619763 722351 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0916 23:57:19.619846 722351 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0916 23:57:19.619990 722351 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0916 23:57:19.620069 722351 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0916 23:57:19.620183 722351 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0916 23:57:19.620281 722351 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0916 23:57:19.621487 722351 out.go:252] - Booting up control plane ...
I0916 23:57:19.621595 722351 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0916 23:57:19.621704 722351 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0916 23:57:19.621799 722351 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0916 23:57:19.621956 722351 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0916 23:57:19.622047 722351 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0916 23:57:19.622137 722351 kubeadm.go:310] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0916 23:57:19.622213 722351 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0916 23:57:19.622246 722351 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0916 23:57:19.622371 722351 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0916 23:57:19.622503 722351 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0916 23:57:19.622564 722351 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.000941296s
I0916 23:57:19.622663 722351 kubeadm.go:310] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I0916 23:57:19.622778 722351 kubeadm.go:310] [control-plane-check] Checking kube-apiserver at https://192.168.49.2:8443/livez
I0916 23:57:19.622893 722351 kubeadm.go:310] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I0916 23:57:19.623021 722351 kubeadm.go:310] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I0916 23:57:19.623126 722351 kubeadm.go:310] [control-plane-check] kube-controller-manager is healthy after 1.545161134s
I0916 23:57:19.623210 722351 kubeadm.go:310] [control-plane-check] kube-scheduler is healthy after 2.1638517s
I0916 23:57:19.623273 722351 kubeadm.go:310] [control-plane-check] kube-apiserver is healthy after 4.001738286s
I0916 23:57:19.623369 722351 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0916 23:57:19.623478 722351 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0916 23:57:19.623551 722351 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0916 23:57:19.623792 722351 kubeadm.go:310] [mark-control-plane] Marking the node ha-198834 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0916 23:57:19.623845 722351 kubeadm.go:310] [bootstrap-token] Using token: wg2on6.splp3qzu9xv61vdp
I0916 23:57:19.625599 722351 out.go:252] - Configuring RBAC rules ...
I0916 23:57:19.625697 722351 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0916 23:57:19.625769 722351 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0916 23:57:19.625966 722351 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0916 23:57:19.626123 722351 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0916 23:57:19.626261 722351 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0916 23:57:19.626367 722351 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0916 23:57:19.626473 722351 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0916 23:57:19.626522 722351 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0916 23:57:19.626564 722351 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0916 23:57:19.626570 722351 kubeadm.go:310]
I0916 23:57:19.626631 722351 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0916 23:57:19.626643 722351 kubeadm.go:310]
I0916 23:57:19.626737 722351 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0916 23:57:19.626747 722351 kubeadm.go:310]
I0916 23:57:19.626781 722351 kubeadm.go:310] mkdir -p $HOME/.kube
I0916 23:57:19.626863 722351 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0916 23:57:19.626960 722351 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0916 23:57:19.626973 722351 kubeadm.go:310]
I0916 23:57:19.627050 722351 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0916 23:57:19.627058 722351 kubeadm.go:310]
I0916 23:57:19.627113 722351 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0916 23:57:19.627119 722351 kubeadm.go:310]
I0916 23:57:19.627167 722351 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0916 23:57:19.627238 722351 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0916 23:57:19.627297 722351 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0916 23:57:19.627302 722351 kubeadm.go:310]
I0916 23:57:19.627381 722351 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0916 23:57:19.627449 722351 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0916 23:57:19.627454 722351 kubeadm.go:310]
I0916 23:57:19.627525 722351 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token wg2on6.splp3qzu9xv61vdp \
I0916 23:57:19.627618 722351 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:aa81889111a47026234b9abf30001d51a9462bec006420d404163720ad63709e \
I0916 23:57:19.627647 722351 kubeadm.go:310] --control-plane
I0916 23:57:19.627653 722351 kubeadm.go:310]
I0916 23:57:19.627725 722351 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0916 23:57:19.627733 722351 kubeadm.go:310]
I0916 23:57:19.627801 722351 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token wg2on6.splp3qzu9xv61vdp \
I0916 23:57:19.627921 722351 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:aa81889111a47026234b9abf30001d51a9462bec006420d404163720ad63709e
I0916 23:57:19.627933 722351 cni.go:84] Creating CNI manager for ""
I0916 23:57:19.627939 722351 cni.go:136] multinode detected (1 nodes found), recommending kindnet
I0916 23:57:19.630017 722351 out.go:179] * Configuring CNI (Container Networking Interface) ...
I0916 23:57:19.631017 722351 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0916 23:57:19.635194 722351 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.0/kubectl ...
I0916 23:57:19.635211 722351 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I0916 23:57:19.655634 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0916 23:57:19.855102 722351 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0916 23:57:19.855186 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:57:19.855265 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes ha-198834 minikube.k8s.io/updated_at=2025_09_16T23_57_19_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a minikube.k8s.io/name=ha-198834 minikube.k8s.io/primary=true
I0916 23:57:19.863538 722351 ops.go:34] apiserver oom_adj: -16
I0916 23:57:19.931275 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:57:20.432025 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:57:20.932100 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:57:21.432105 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:57:21.932376 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:57:22.432213 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:57:22.931583 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:57:23.431392 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:57:23.932193 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:57:24.431927 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 23:57:24.504799 722351 kubeadm.go:1105] duration metric: took 4.649687278s to wait for elevateKubeSystemPrivileges
I0916 23:57:24.504835 722351 kubeadm.go:394] duration metric: took 14.81493092s to StartCluster
I0916 23:57:24.504858 722351 settings.go:142] acquiring lock: {Name:mk17965980d5178c2751d83eb1933be3ac57e811 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:24.504967 722351 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21550-661878/kubeconfig
I0916 23:57:24.505808 722351 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-661878/kubeconfig: {Name:mk609009f6fceff95c9f72883135342a90d871f0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:24.506080 722351 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0916 23:57:24.506079 722351 start.go:233] HA (multi-control plane) cluster: will skip waiting for primary control-plane node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0916 23:57:24.506102 722351 start.go:241] waiting for startup goroutines ...
I0916 23:57:24.506120 722351 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0916 23:57:24.506215 722351 addons.go:69] Setting storage-provisioner=true in profile "ha-198834"
I0916 23:57:24.506241 722351 addons.go:238] Setting addon storage-provisioner=true in "ha-198834"
I0916 23:57:24.506236 722351 addons.go:69] Setting default-storageclass=true in profile "ha-198834"
I0916 23:57:24.506263 722351 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "ha-198834"
I0916 23:57:24.506271 722351 host.go:66] Checking if "ha-198834" exists ...
I0916 23:57:24.506311 722351 config.go:182] Loaded profile config "ha-198834": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0916 23:57:24.506630 722351 cli_runner.go:164] Run: docker container inspect ha-198834 --format={{.State.Status}}
I0916 23:57:24.506797 722351 cli_runner.go:164] Run: docker container inspect ha-198834 --format={{.State.Status}}
I0916 23:57:24.527476 722351 kapi.go:59] client config for ha-198834: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.crt", KeyFile:"/home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.key", CAFile:"/home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4620), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0916 23:57:24.528010 722351 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false
I0916 23:57:24.528028 722351 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false
I0916 23:57:24.528032 722351 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false
I0916 23:57:24.528036 722351 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true
I0916 23:57:24.528039 722351 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false
I0916 23:57:24.528105 722351 cert_rotation.go:141] "Starting client certificate rotation controller" logger="tls-transport-cache"
I0916 23:57:24.528384 722351 addons.go:238] Setting addon default-storageclass=true in "ha-198834"
I0916 23:57:24.528420 722351 host.go:66] Checking if "ha-198834" exists ...
I0916 23:57:24.528683 722351 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0916 23:57:24.528891 722351 cli_runner.go:164] Run: docker container inspect ha-198834 --format={{.State.Status}}
I0916 23:57:24.530050 722351 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0916 23:57:24.530067 722351 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0916 23:57:24.530109 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:57:24.548463 722351 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I0916 23:57:24.548490 722351 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0916 23:57:24.548552 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:57:24.551711 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834/id_rsa Username:docker}
I0916 23:57:24.575963 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834/id_rsa Username:docker}
I0916 23:57:24.622716 722351 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0916 23:57:24.680948 722351 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0916 23:57:24.725959 722351 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0916 23:57:24.815565 722351 start.go:976] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0916 23:57:25.027949 722351 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I0916 23:57:25.029176 722351 addons.go:514] duration metric: took 523.059617ms for enable addons: enabled=[storage-provisioner default-storageclass]
I0916 23:57:25.029216 722351 start.go:246] waiting for cluster config update ...
I0916 23:57:25.029233 722351 start.go:255] writing updated cluster config ...
I0916 23:57:25.030834 722351 out.go:203]
I0916 23:57:25.032180 722351 config.go:182] Loaded profile config "ha-198834": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0916 23:57:25.032246 722351 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/config.json ...
I0916 23:57:25.033846 722351 out.go:179] * Starting "ha-198834-m02" control-plane node in "ha-198834" cluster
I0916 23:57:25.035651 722351 cache.go:123] Beginning downloading kic base image for docker with docker
I0916 23:57:25.036699 722351 out.go:179] * Pulling base image v0.0.48 ...
I0916 23:57:25.038502 722351 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0916 23:57:25.038524 722351 cache.go:58] Caching tarball of preloaded images
I0916 23:57:25.038599 722351 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0916 23:57:25.038624 722351 preload.go:172] Found /home/jenkins/minikube-integration/21550-661878/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0916 23:57:25.038635 722351 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on docker
I0916 23:57:25.038696 722351 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/config.json ...
I0916 23:57:25.064556 722351 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0916 23:57:25.064575 722351 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0916 23:57:25.064593 722351 cache.go:232] Successfully downloaded all kic artifacts
I0916 23:57:25.064625 722351 start.go:360] acquireMachinesLock for ha-198834-m02: {Name:mka26d69ac2a19118f71b5186fd38cc3e669de2c Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0916 23:57:25.064737 722351 start.go:364] duration metric: took 87.928µs to acquireMachinesLock for "ha-198834-m02"
I0916 23:57:25.064767 722351 start.go:93] Provisioning new machine with config: &{Name:ha-198834 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-198834 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[]
MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name:m02 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0916 23:57:25.064852 722351 start.go:125] createHost starting for "m02" (driver="docker")
I0916 23:57:25.067030 722351 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0916 23:57:25.067261 722351 start.go:159] libmachine.API.Create for "ha-198834" (driver="docker")
I0916 23:57:25.067302 722351 client.go:168] LocalClient.Create starting
I0916 23:57:25.067392 722351 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem
I0916 23:57:25.067435 722351 main.go:141] libmachine: Decoding PEM data...
I0916 23:57:25.067451 722351 main.go:141] libmachine: Parsing certificate...
I0916 23:57:25.067520 722351 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21550-661878/.minikube/certs/cert.pem
I0916 23:57:25.067544 722351 main.go:141] libmachine: Decoding PEM data...
I0916 23:57:25.067561 722351 main.go:141] libmachine: Parsing certificate...
I0916 23:57:25.067817 722351 cli_runner.go:164] Run: docker network inspect ha-198834 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0916 23:57:25.087287 722351 network_create.go:77] Found existing network {name:ha-198834 subnet:0xc0008ae780 gateway:[0 0 0 0 0 0 0 0 0 0 255 255 192 168 49 1] mtu:1500}
I0916 23:57:25.087329 722351 kic.go:121] calculated static IP "192.168.49.3" for the "ha-198834-m02" container
I0916 23:57:25.087390 722351 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0916 23:57:25.104356 722351 cli_runner.go:164] Run: docker volume create ha-198834-m02 --label name.minikube.sigs.k8s.io=ha-198834-m02 --label created_by.minikube.sigs.k8s.io=true
I0916 23:57:25.128318 722351 oci.go:103] Successfully created a docker volume ha-198834-m02
I0916 23:57:25.128423 722351 cli_runner.go:164] Run: docker run --rm --name ha-198834-m02-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-198834-m02 --entrypoint /usr/bin/test -v ha-198834-m02:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0916 23:57:25.555443 722351 oci.go:107] Successfully prepared a docker volume ha-198834-m02
I0916 23:57:25.555486 722351 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0916 23:57:25.555507 722351 kic.go:194] Starting extracting preloaded images to volume ...
I0916 23:57:25.555574 722351 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-661878/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-198834-m02:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0916 23:57:29.769985 722351 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-661878/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-198834-m02:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (4.214340138s)
I0916 23:57:29.770025 722351 kic.go:203] duration metric: took 4.214511914s to extract preloaded images to volume ...
W0916 23:57:29.770138 722351 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0916 23:57:29.770180 722351 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0916 23:57:29.770230 722351 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0916 23:57:29.831280 722351 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ha-198834-m02 --name ha-198834-m02 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-198834-m02 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ha-198834-m02 --network ha-198834 --ip 192.168.49.3 --volume ha-198834-m02:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0916 23:57:30.118263 722351 cli_runner.go:164] Run: docker container inspect ha-198834-m02 --format={{.State.Running}}
I0916 23:57:30.140753 722351 cli_runner.go:164] Run: docker container inspect ha-198834-m02 --format={{.State.Status}}
I0916 23:57:30.161053 722351 cli_runner.go:164] Run: docker exec ha-198834-m02 stat /var/lib/dpkg/alternatives/iptables
I0916 23:57:30.204746 722351 oci.go:144] the created container "ha-198834-m02" has a running status.
I0916 23:57:30.204782 722351 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m02/id_rsa...
I0916 23:57:30.491277 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m02/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0916 23:57:30.491341 722351 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m02/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0916 23:57:30.523169 722351 cli_runner.go:164] Run: docker container inspect ha-198834-m02 --format={{.State.Status}}
I0916 23:57:30.546155 722351 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0916 23:57:30.546178 722351 kic_runner.go:114] Args: [docker exec --privileged ha-198834-m02 chown docker:docker /home/docker/.ssh/authorized_keys]
I0916 23:57:30.603616 722351 cli_runner.go:164] Run: docker container inspect ha-198834-m02 --format={{.State.Status}}
I0916 23:57:30.624695 722351 machine.go:93] provisionDockerMachine start ...
I0916 23:57:30.624784 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m02
I0916 23:57:30.648569 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:30.648946 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0916 23:57:30.648966 722351 main.go:141] libmachine: About to run SSH command:
hostname
I0916 23:57:30.800750 722351 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-198834-m02
I0916 23:57:30.800784 722351 ubuntu.go:182] provisioning hostname "ha-198834-m02"
I0916 23:57:30.800873 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m02
I0916 23:57:30.822237 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:30.822505 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0916 23:57:30.822519 722351 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-198834-m02 && echo "ha-198834-m02" | sudo tee /etc/hostname
I0916 23:57:30.984206 722351 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-198834-m02
I0916 23:57:30.984307 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m02
I0916 23:57:31.007082 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:31.007398 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0916 23:57:31.007430 722351 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-198834-m02' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-198834-m02/g' /etc/hosts;
else
echo '127.0.1.1 ha-198834-m02' | sudo tee -a /etc/hosts;
fi
fi
I0916 23:57:31.152561 722351 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0916 23:57:31.152598 722351 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21550-661878/.minikube CaCertPath:/home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21550-661878/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21550-661878/.minikube}
I0916 23:57:31.152624 722351 ubuntu.go:190] setting up certificates
I0916 23:57:31.152644 722351 provision.go:84] configureAuth start
I0916 23:57:31.152709 722351 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-198834-m02
I0916 23:57:31.171931 722351 provision.go:143] copyHostCerts
I0916 23:57:31.171978 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/21550-661878/.minikube/ca.pem
I0916 23:57:31.172008 722351 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-661878/.minikube/ca.pem, removing ...
I0916 23:57:31.172014 722351 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-661878/.minikube/ca.pem
I0916 23:57:31.172081 722351 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21550-661878/.minikube/ca.pem (1078 bytes)
I0916 23:57:31.172159 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/21550-661878/.minikube/cert.pem
I0916 23:57:31.172181 722351 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-661878/.minikube/cert.pem, removing ...
I0916 23:57:31.172185 722351 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-661878/.minikube/cert.pem
I0916 23:57:31.172216 722351 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21550-661878/.minikube/cert.pem (1123 bytes)
I0916 23:57:31.172262 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/21550-661878/.minikube/key.pem
I0916 23:57:31.172279 722351 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-661878/.minikube/key.pem, removing ...
I0916 23:57:31.172287 722351 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-661878/.minikube/key.pem
I0916 23:57:31.172310 722351 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21550-661878/.minikube/key.pem (1679 bytes)
I0916 23:57:31.172361 722351 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21550-661878/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca-key.pem org=jenkins.ha-198834-m02 san=[127.0.0.1 192.168.49.3 ha-198834-m02 localhost minikube]
I0916 23:57:31.314068 722351 provision.go:177] copyRemoteCerts
I0916 23:57:31.314146 722351 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0916 23:57:31.314208 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m02
I0916 23:57:31.336792 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m02/id_rsa Username:docker}
I0916 23:57:31.442195 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0916 23:57:31.442269 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0916 23:57:31.472780 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/machines/server.pem -> /etc/docker/server.pem
I0916 23:57:31.472841 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0916 23:57:31.499569 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0916 23:57:31.499653 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0916 23:57:31.530277 722351 provision.go:87] duration metric: took 377.61476ms to configureAuth
I0916 23:57:31.530311 722351 ubuntu.go:206] setting minikube options for container-runtime
I0916 23:57:31.530528 722351 config.go:182] Loaded profile config "ha-198834": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0916 23:57:31.530587 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m02
I0916 23:57:31.548573 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:31.548821 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0916 23:57:31.548841 722351 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0916 23:57:31.695327 722351 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0916 23:57:31.695357 722351 ubuntu.go:71] root file system type: overlay
I0916 23:57:31.695559 722351 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0916 23:57:31.695639 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m02
I0916 23:57:31.715926 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:31.716269 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0916 23:57:31.716384 722351 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
Environment="NO_PROXY=192.168.49.2"
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0916 23:57:31.879960 722351 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
Environment=NO_PROXY=192.168.49.2
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I0916 23:57:31.880054 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m02
I0916 23:57:31.901465 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:57:31.901783 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0916 23:57:31.901817 722351 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0916 23:57:33.107385 722351 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-09-03 20:55:49.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2025-09-16 23:57:31.877658246 +0000
@@ -9,23 +9,35 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+Environment=NO_PROXY=192.168.49.2
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0916 23:57:33.107432 722351 machine.go:96] duration metric: took 2.482713737s to provisionDockerMachine
I0916 23:57:33.107448 722351 client.go:171] duration metric: took 8.040135103s to LocalClient.Create
I0916 23:57:33.107471 722351 start.go:167] duration metric: took 8.040214449s to libmachine.API.Create "ha-198834"
I0916 23:57:33.107480 722351 start.go:293] postStartSetup for "ha-198834-m02" (driver="docker")
I0916 23:57:33.107493 722351 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0916 23:57:33.107570 722351 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0916 23:57:33.107624 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m02
I0916 23:57:33.129478 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m02/id_rsa Username:docker}
I0916 23:57:33.235200 722351 ssh_runner.go:195] Run: cat /etc/os-release
I0916 23:57:33.239799 722351 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0916 23:57:33.239842 722351 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0916 23:57:33.239854 722351 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0916 23:57:33.239862 722351 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0916 23:57:33.239881 722351 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-661878/.minikube/addons for local assets ...
I0916 23:57:33.239961 722351 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-661878/.minikube/files for local assets ...
I0916 23:57:33.240070 722351 filesync.go:149] local asset: /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem -> 6653992.pem in /etc/ssl/certs
I0916 23:57:33.240085 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem -> /etc/ssl/certs/6653992.pem
I0916 23:57:33.240211 722351 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0916 23:57:33.252619 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem --> /etc/ssl/certs/6653992.pem (1708 bytes)
I0916 23:57:33.291135 722351 start.go:296] duration metric: took 183.636707ms for postStartSetup
I0916 23:57:33.291600 722351 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-198834-m02
I0916 23:57:33.313645 722351 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/config.json ...
I0916 23:57:33.314041 722351 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0916 23:57:33.314103 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m02
I0916 23:57:33.337314 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m02/id_rsa Username:docker}
I0916 23:57:33.439716 722351 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0916 23:57:33.445408 722351 start.go:128] duration metric: took 8.380530846s to createHost
I0916 23:57:33.445437 722351 start.go:83] releasing machines lock for "ha-198834-m02", held for 8.380681461s
I0916 23:57:33.445500 722351 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-198834-m02
I0916 23:57:33.469661 722351 out.go:179] * Found network options:
I0916 23:57:33.471226 722351 out.go:179] - NO_PROXY=192.168.49.2
W0916 23:57:33.472373 722351 proxy.go:120] fail to check proxy env: Error ip not in block
W0916 23:57:33.472429 722351 proxy.go:120] fail to check proxy env: Error ip not in block
I0916 23:57:33.472520 722351 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0916 23:57:33.472550 722351 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0916 23:57:33.472570 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m02
I0916 23:57:33.472621 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m02
I0916 23:57:33.495822 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m02/id_rsa Username:docker}
I0916 23:57:33.496478 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m02/id_rsa Username:docker}
I0916 23:57:33.601441 722351 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0916 23:57:33.704002 722351 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0916 23:57:33.704085 722351 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0916 23:57:33.742848 722351 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0916 23:57:33.742881 722351 start.go:495] detecting cgroup driver to use...
I0916 23:57:33.742929 722351 detect.go:190] detected "systemd" cgroup driver on host os
I0916 23:57:33.743066 722351 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0916 23:57:33.765394 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0916 23:57:33.781702 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0916 23:57:33.796106 722351 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0916 23:57:33.796186 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0916 23:57:33.811490 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0916 23:57:33.825594 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0916 23:57:33.840006 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0916 23:57:33.853819 722351 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0916 23:57:33.867424 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0916 23:57:33.882022 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0916 23:57:33.896562 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0916 23:57:33.910813 722351 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0916 23:57:33.923436 722351 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0916 23:57:33.936892 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:34.033978 722351 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0916 23:57:34.137820 722351 start.go:495] detecting cgroup driver to use...
I0916 23:57:34.137955 722351 detect.go:190] detected "systemd" cgroup driver on host os
I0916 23:57:34.138026 722351 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0916 23:57:34.154788 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0916 23:57:34.170769 722351 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0916 23:57:34.190397 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0916 23:57:34.207526 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0916 23:57:34.224333 722351 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0916 23:57:34.249827 722351 ssh_runner.go:195] Run: which cri-dockerd
I0916 23:57:34.255532 722351 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0916 23:57:34.270253 722351 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I0916 23:57:34.296311 722351 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0916 23:57:34.391517 722351 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0916 23:57:34.486390 722351 docker.go:575] configuring docker to use "systemd" as cgroup driver...
I0916 23:57:34.486452 722351 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes)
I0916 23:57:34.512957 722351 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I0916 23:57:34.529696 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:34.623612 722351 ssh_runner.go:195] Run: sudo systemctl restart docker
I0916 23:57:35.389236 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0916 23:57:35.402665 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0916 23:57:35.418828 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0916 23:57:35.433733 722351 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0916 23:57:35.524509 722351 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0916 23:57:35.615815 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:35.688879 722351 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0916 23:57:35.713552 722351 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I0916 23:57:35.729264 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:35.818355 722351 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0916 23:57:35.908063 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0916 23:57:35.921416 722351 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0916 23:57:35.921483 722351 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0916 23:57:35.925600 722351 start.go:563] Will wait 60s for crictl version
I0916 23:57:35.925666 722351 ssh_runner.go:195] Run: which crictl
I0916 23:57:35.929510 722351 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0916 23:57:35.970926 722351 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.4.0
RuntimeApiVersion: v1
I0916 23:57:35.971002 722351 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0916 23:57:36.001052 722351 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0916 23:57:36.032731 722351 out.go:252] * Preparing Kubernetes v1.34.0 on Docker 28.4.0 ...
I0916 23:57:36.033881 722351 out.go:179] - env NO_PROXY=192.168.49.2
I0916 23:57:36.035387 722351 cli_runner.go:164] Run: docker network inspect ha-198834 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0916 23:57:36.055948 722351 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0916 23:57:36.061767 722351 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0916 23:57:36.076229 722351 mustload.go:65] Loading cluster: ha-198834
I0916 23:57:36.076482 722351 config.go:182] Loaded profile config "ha-198834": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0916 23:57:36.076794 722351 cli_runner.go:164] Run: docker container inspect ha-198834 --format={{.State.Status}}
I0916 23:57:36.099199 722351 host.go:66] Checking if "ha-198834" exists ...
I0916 23:57:36.099483 722351 certs.go:68] Setting up /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834 for IP: 192.168.49.3
I0916 23:57:36.099498 722351 certs.go:194] generating shared ca certs ...
I0916 23:57:36.099514 722351 certs.go:226] acquiring lock for ca certs: {Name:mk24ad2a96dc59b16a9413b27c57b0ccb7d8ca57 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:36.099667 722351 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21550-661878/.minikube/ca.key
I0916 23:57:36.099721 722351 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21550-661878/.minikube/proxy-client-ca.key
I0916 23:57:36.099735 722351 certs.go:256] generating profile certs ...
I0916 23:57:36.099834 722351 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.key
I0916 23:57:36.099867 722351 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key.3ea539d4
I0916 23:57:36.099889 722351 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt.3ea539d4 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.3 192.168.49.254]
I0916 23:57:36.171638 722351 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt.3ea539d4 ...
I0916 23:57:36.171669 722351 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt.3ea539d4: {Name:mk274e4893d598b40c8fed777bc1c7c2e951159a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:36.171866 722351 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key.3ea539d4 ...
I0916 23:57:36.171885 722351 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key.3ea539d4: {Name:mkf2a66869f0c345fb28cc9925dc0bb02623a928 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:57:36.172011 722351 certs.go:381] copying /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt.3ea539d4 -> /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt
I0916 23:57:36.172195 722351 certs.go:385] copying /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key.3ea539d4 -> /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key
I0916 23:57:36.172362 722351 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.key
I0916 23:57:36.172381 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0916 23:57:36.172396 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0916 23:57:36.172415 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0916 23:57:36.172438 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0916 23:57:36.172457 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0916 23:57:36.172474 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0916 23:57:36.172493 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0916 23:57:36.172512 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0916 23:57:36.172589 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/665399.pem (1338 bytes)
W0916 23:57:36.172634 722351 certs.go:480] ignoring /home/jenkins/minikube-integration/21550-661878/.minikube/certs/665399_empty.pem, impossibly tiny 0 bytes
I0916 23:57:36.172648 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca-key.pem (1675 bytes)
I0916 23:57:36.172679 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem (1078 bytes)
I0916 23:57:36.172703 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/cert.pem (1123 bytes)
I0916 23:57:36.172736 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/key.pem (1679 bytes)
I0916 23:57:36.172796 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem (1708 bytes)
I0916 23:57:36.172840 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem -> /usr/share/ca-certificates/6653992.pem
I0916 23:57:36.172861 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:36.172878 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/665399.pem -> /usr/share/ca-certificates/665399.pem
I0916 23:57:36.172963 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:57:36.194873 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834/id_rsa Username:docker}
I0916 23:57:36.286293 722351 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.pub
I0916 23:57:36.291948 722351 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.pub --> memory (451 bytes)
I0916 23:57:36.308150 722351 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.key
I0916 23:57:36.312206 722351 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.key --> memory (1675 bytes)
I0916 23:57:36.325598 722351 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.crt
I0916 23:57:36.329618 722351 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.crt --> memory (1123 bytes)
I0916 23:57:36.346110 722351 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.key
I0916 23:57:36.350017 722351 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.key --> memory (1675 bytes)
I0916 23:57:36.365628 722351 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.crt
I0916 23:57:36.369445 722351 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.crt --> memory (1094 bytes)
I0916 23:57:36.383675 722351 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.key
I0916 23:57:36.387388 722351 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.key --> memory (1675 bytes)
I0916 23:57:36.403394 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0916 23:57:36.432068 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0916 23:57:36.461592 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0916 23:57:36.491261 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0916 23:57:36.523895 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1436 bytes)
I0916 23:57:36.552719 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0916 23:57:36.580284 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0916 23:57:36.608342 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0916 23:57:36.639670 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem --> /usr/share/ca-certificates/6653992.pem (1708 bytes)
I0916 23:57:36.672003 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0916 23:57:36.703856 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/certs/665399.pem --> /usr/share/ca-certificates/665399.pem (1338 bytes)
I0916 23:57:36.734275 722351 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.pub (451 bytes)
I0916 23:57:36.755638 722351 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.key (1675 bytes)
I0916 23:57:36.777805 722351 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.crt (1123 bytes)
I0916 23:57:36.799338 722351 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.key (1675 bytes)
I0916 23:57:36.821463 722351 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.crt (1094 bytes)
I0916 23:57:36.843600 722351 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.key (1675 bytes)
I0916 23:57:36.867808 722351 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (744 bytes)
I0916 23:57:36.889233 722351 ssh_runner.go:195] Run: openssl version
I0916 23:57:36.896091 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/665399.pem && ln -fs /usr/share/ca-certificates/665399.pem /etc/ssl/certs/665399.pem"
I0916 23:57:36.908363 722351 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/665399.pem
I0916 23:57:36.913145 722351 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 16 23:53 /usr/share/ca-certificates/665399.pem
I0916 23:57:36.913212 722351 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/665399.pem
I0916 23:57:36.921857 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/665399.pem /etc/ssl/certs/51391683.0"
I0916 23:57:36.934186 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/6653992.pem && ln -fs /usr/share/ca-certificates/6653992.pem /etc/ssl/certs/6653992.pem"
I0916 23:57:36.945282 722351 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/6653992.pem
I0916 23:57:36.949180 722351 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 16 23:53 /usr/share/ca-certificates/6653992.pem
I0916 23:57:36.949249 722351 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/6653992.pem
I0916 23:57:36.958068 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/6653992.pem /etc/ssl/certs/3ec20f2e.0"
I0916 23:57:36.970160 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0916 23:57:36.981053 722351 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:36.985350 722351 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 16 23:48 /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:36.985410 722351 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0916 23:57:36.993828 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0916 23:57:37.004616 722351 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0916 23:57:37.008764 722351 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0916 23:57:37.008830 722351 kubeadm.go:926] updating node {m02 192.168.49.3 8443 v1.34.0 docker true true} ...
I0916 23:57:37.008961 722351 kubeadm.go:938] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-198834-m02 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.3
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:ha-198834 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0916 23:57:37.008998 722351 kube-vip.go:115] generating kube-vip config ...
I0916 23:57:37.009050 722351 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I0916 23:57:37.026582 722351 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I0916 23:57:37.026656 722351 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v1.0.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/admin.conf"
name: kubeconfig
status: {}
I0916 23:57:37.026738 722351 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0916 23:57:37.036867 722351 binaries.go:44] Found k8s binaries, skipping transfer
I0916 23:57:37.036974 722351 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /etc/kubernetes/manifests
I0916 23:57:37.046606 722351 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I0916 23:57:37.070259 722351 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0916 23:57:37.092325 722351 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1358 bytes)
I0916 23:57:37.116853 722351 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I0916 23:57:37.120789 722351 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0916 23:57:37.137396 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:37.223494 722351 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0916 23:57:37.256254 722351 host.go:66] Checking if "ha-198834" exists ...
I0916 23:57:37.256574 722351 start.go:317] joinCluster: &{Name:ha-198834 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-198834 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[]
DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0
MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0916 23:57:37.256705 722351 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm token create --print-join-command --ttl=0"
I0916 23:57:37.256762 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:57:37.278264 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834/id_rsa Username:docker}
I0916 23:57:37.435308 722351 start.go:343] trying to join control-plane node "m02" to cluster: &{Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0916 23:57:37.435366 722351 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token rs0rx7.1v9nwhb46wdsoqvk --discovery-token-ca-cert-hash sha256:aa81889111a47026234b9abf30001d51a9462bec006420d404163720ad63709e --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock --node-name=ha-198834-m02 --control-plane --apiserver-advertise-address=192.168.49.3 --apiserver-bind-port=8443"
I0916 23:57:54.013635 722351 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token rs0rx7.1v9nwhb46wdsoqvk --discovery-token-ca-cert-hash sha256:aa81889111a47026234b9abf30001d51a9462bec006420d404163720ad63709e --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock --node-name=ha-198834-m02 --control-plane --apiserver-advertise-address=192.168.49.3 --apiserver-bind-port=8443": (16.578241326s)
I0916 23:57:54.013701 722351 ssh_runner.go:195] Run: /bin/bash -c "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet"
I0916 23:57:54.233708 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes ha-198834-m02 minikube.k8s.io/updated_at=2025_09_16T23_57_54_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a minikube.k8s.io/name=ha-198834 minikube.k8s.io/primary=false
I0916 23:57:54.308006 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig taint nodes ha-198834-m02 node-role.kubernetes.io/control-plane:NoSchedule-
I0916 23:57:54.383356 722351 start.go:319] duration metric: took 17.126777498s to joinCluster
I0916 23:57:54.383433 722351 start.go:235] Will wait 6m0s for node &{Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0916 23:57:54.383691 722351 config.go:182] Loaded profile config "ha-198834": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0916 23:57:54.385020 722351 out.go:179] * Verifying Kubernetes components...
I0916 23:57:54.386187 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:57:54.491315 722351 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0916 23:57:54.505328 722351 kapi.go:59] client config for ha-198834: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.crt", KeyFile:"/home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.key", CAFile:"/home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4620), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
W0916 23:57:54.505398 722351 kubeadm.go:483] Overriding stale ClientConfig host https://192.168.49.254:8443 with https://192.168.49.2:8443
I0916 23:57:54.505659 722351 node_ready.go:35] waiting up to 6m0s for node "ha-198834-m02" to be "Ready" ...
I0916 23:57:56.508947 722351 node_ready.go:49] node "ha-198834-m02" is "Ready"
I0916 23:57:56.508979 722351 node_ready.go:38] duration metric: took 2.003299323s for node "ha-198834-m02" to be "Ready" ...
I0916 23:57:56.508998 722351 api_server.go:52] waiting for apiserver process to appear ...
I0916 23:57:56.509065 722351 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0916 23:57:56.521258 722351 api_server.go:72] duration metric: took 2.137779117s to wait for apiserver process to appear ...
I0916 23:57:56.521298 722351 api_server.go:88] waiting for apiserver healthz status ...
I0916 23:57:56.521326 722351 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0916 23:57:56.527086 722351 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0916 23:57:56.528055 722351 api_server.go:141] control plane version: v1.34.0
I0916 23:57:56.528078 722351 api_server.go:131] duration metric: took 6.77168ms to wait for apiserver health ...
I0916 23:57:56.528087 722351 system_pods.go:43] waiting for kube-system pods to appear ...
I0916 23:57:56.534412 722351 system_pods.go:59] 19 kube-system pods found
I0916 23:57:56.534478 722351 system_pods.go:61] "coredns-66bc5c9577-5wx4k" [6f279fd8-dd3c-49a5-863d-a53124ecf1f5] Running
I0916 23:57:56.534486 722351 system_pods.go:61] "coredns-66bc5c9577-mjbz6" [c918625f-be11-44bf-8b82-d4c21b8993d1] Running
I0916 23:57:56.534497 722351 system_pods.go:61] "etcd-ha-198834" [8374ebf7-cb1d-422e-8768-584e07b2dcab] Running
I0916 23:57:56.534503 722351 system_pods.go:61] "etcd-ha-198834-m02" [222eaa2a-824e-4087-a614-f5f5a6de8e98] Pending
I0916 23:57:56.534515 722351 system_pods.go:61] "kindnet-2vbn5" [acd8be88-6ee7-4832-830f-c98aaabacd81] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-2vbn5": pod kindnet-2vbn5 is already assigned to node "ha-198834-m02")
I0916 23:57:56.534524 722351 system_pods.go:61] "kindnet-h28vp" [6c51d39f-7e43-461b-a021-13ddf0cb9845] Running
I0916 23:57:56.534535 722351 system_pods.go:61] "kindnet-mh8pf" [4bbbea44-3bf9-4c36-b876-fb4390d15dfc] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-mh8pf": pod kindnet-mh8pf is already assigned to node "ha-198834-m02")
I0916 23:57:56.534541 722351 system_pods.go:61] "kube-apiserver-ha-198834" [5176645e-1819-4ab4-add3-9355f9a506ce] Running
I0916 23:57:56.534547 722351 system_pods.go:61] "kube-apiserver-ha-198834-m02" [a2c8eefd-3b40-484d-9939-74b5fdba7182] Pending
I0916 23:57:56.534559 722351 system_pods.go:61] "kube-controller-manager-ha-198834" [36327629-7bc1-440d-b760-3fdf88af1b03] Running
I0916 23:57:56.534564 722351 system_pods.go:61] "kube-controller-manager-ha-198834-m02" [434a65bb-a306-4798-81f9-9631313ba763] Pending
I0916 23:57:56.534667 722351 system_pods.go:61] "kube-proxy-5tkhn" [5edbfebe-2590-4d23-b80e-7496a4e9a5b6] Running
I0916 23:57:56.534716 722351 system_pods.go:61] "kube-proxy-h2fxd" [db1b17f7-7be8-46ef-8eb3-98432a2eec18] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-h2fxd": pod kube-proxy-h2fxd is already assigned to node "ha-198834-m02")
I0916 23:57:56.534725 722351 system_pods.go:61] "kube-proxy-ld4mc" [8b35ded7-d5ce-4805-8573-9dede265d002] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-ld4mc": pod kube-proxy-ld4mc is already assigned to node "ha-198834-m02")
I0916 23:57:56.534731 722351 system_pods.go:61] "kube-scheduler-ha-198834" [45afa1e0-273e-44fc-b170-bdc7a365273e] Running
I0916 23:57:56.534743 722351 system_pods.go:61] "kube-scheduler-ha-198834-m02" [633b0b32-1d8b-4301-85a5-8c36f53296e3] Pending
I0916 23:57:56.534748 722351 system_pods.go:61] "kube-vip-ha-198834" [cde651e3-1550-48cb-a5dc-09d55185429b] Running
I0916 23:57:56.534753 722351 system_pods.go:61] "kube-vip-ha-198834-m02" [a9e3b24d-529d-409c-982f-c72bd0cc4693] Pending
I0916 23:57:56.534758 722351 system_pods.go:61] "storage-provisioner" [6b6f64f3-2647-4e13-be41-47fcc6111f3e] Running
I0916 23:57:56.534765 722351 system_pods.go:74] duration metric: took 6.672375ms to wait for pod list to return data ...
I0916 23:57:56.534774 722351 default_sa.go:34] waiting for default service account to be created ...
I0916 23:57:56.538351 722351 default_sa.go:45] found service account: "default"
I0916 23:57:56.538385 722351 default_sa.go:55] duration metric: took 3.603096ms for default service account to be created ...
I0916 23:57:56.538399 722351 system_pods.go:116] waiting for k8s-apps to be running ...
I0916 23:57:56.542274 722351 system_pods.go:86] 19 kube-system pods found
I0916 23:57:56.542301 722351 system_pods.go:89] "coredns-66bc5c9577-5wx4k" [6f279fd8-dd3c-49a5-863d-a53124ecf1f5] Running
I0916 23:57:56.542307 722351 system_pods.go:89] "coredns-66bc5c9577-mjbz6" [c918625f-be11-44bf-8b82-d4c21b8993d1] Running
I0916 23:57:56.542311 722351 system_pods.go:89] "etcd-ha-198834" [8374ebf7-cb1d-422e-8768-584e07b2dcab] Running
I0916 23:57:56.542314 722351 system_pods.go:89] "etcd-ha-198834-m02" [222eaa2a-824e-4087-a614-f5f5a6de8e98] Pending
I0916 23:57:56.542321 722351 system_pods.go:89] "kindnet-2vbn5" [acd8be88-6ee7-4832-830f-c98aaabacd81] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-2vbn5": pod kindnet-2vbn5 is already assigned to node "ha-198834-m02")
I0916 23:57:56.542325 722351 system_pods.go:89] "kindnet-h28vp" [6c51d39f-7e43-461b-a021-13ddf0cb9845] Running
I0916 23:57:56.542330 722351 system_pods.go:89] "kindnet-mh8pf" [4bbbea44-3bf9-4c36-b876-fb4390d15dfc] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-mh8pf": pod kindnet-mh8pf is already assigned to node "ha-198834-m02")
I0916 23:57:56.542334 722351 system_pods.go:89] "kube-apiserver-ha-198834" [5176645e-1819-4ab4-add3-9355f9a506ce] Running
I0916 23:57:56.542338 722351 system_pods.go:89] "kube-apiserver-ha-198834-m02" [a2c8eefd-3b40-484d-9939-74b5fdba7182] Pending
I0916 23:57:56.542344 722351 system_pods.go:89] "kube-controller-manager-ha-198834" [36327629-7bc1-440d-b760-3fdf88af1b03] Running
I0916 23:57:56.542347 722351 system_pods.go:89] "kube-controller-manager-ha-198834-m02" [434a65bb-a306-4798-81f9-9631313ba763] Pending
I0916 23:57:56.542351 722351 system_pods.go:89] "kube-proxy-5tkhn" [5edbfebe-2590-4d23-b80e-7496a4e9a5b6] Running
I0916 23:57:56.542356 722351 system_pods.go:89] "kube-proxy-h2fxd" [db1b17f7-7be8-46ef-8eb3-98432a2eec18] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-h2fxd": pod kube-proxy-h2fxd is already assigned to node "ha-198834-m02")
I0916 23:57:56.542367 722351 system_pods.go:89] "kube-proxy-ld4mc" [8b35ded7-d5ce-4805-8573-9dede265d002] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-ld4mc": pod kube-proxy-ld4mc is already assigned to node "ha-198834-m02")
I0916 23:57:56.542371 722351 system_pods.go:89] "kube-scheduler-ha-198834" [45afa1e0-273e-44fc-b170-bdc7a365273e] Running
I0916 23:57:56.542375 722351 system_pods.go:89] "kube-scheduler-ha-198834-m02" [633b0b32-1d8b-4301-85a5-8c36f53296e3] Pending
I0916 23:57:56.542377 722351 system_pods.go:89] "kube-vip-ha-198834" [cde651e3-1550-48cb-a5dc-09d55185429b] Running
I0916 23:57:56.542380 722351 system_pods.go:89] "kube-vip-ha-198834-m02" [a9e3b24d-529d-409c-982f-c72bd0cc4693] Pending
I0916 23:57:56.542384 722351 system_pods.go:89] "storage-provisioner" [6b6f64f3-2647-4e13-be41-47fcc6111f3e] Running
I0916 23:57:56.542393 722351 system_pods.go:126] duration metric: took 3.988364ms to wait for k8s-apps to be running ...
I0916 23:57:56.542403 722351 system_svc.go:44] waiting for kubelet service to be running ....
I0916 23:57:56.542447 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0916 23:57:56.554466 722351 system_svc.go:56] duration metric: took 12.054188ms WaitForService to wait for kubelet
I0916 23:57:56.554496 722351 kubeadm.go:578] duration metric: took 2.171026353s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0916 23:57:56.554519 722351 node_conditions.go:102] verifying NodePressure condition ...
I0916 23:57:56.557501 722351 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0916 23:57:56.557532 722351 node_conditions.go:123] node cpu capacity is 8
I0916 23:57:56.557552 722351 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0916 23:57:56.557557 722351 node_conditions.go:123] node cpu capacity is 8
I0916 23:57:56.557561 722351 node_conditions.go:105] duration metric: took 3.037317ms to run NodePressure ...
I0916 23:57:56.557575 722351 start.go:241] waiting for startup goroutines ...
I0916 23:57:56.557610 722351 start.go:255] writing updated cluster config ...
I0916 23:57:56.559549 722351 out.go:203]
I0916 23:57:56.561097 722351 config.go:182] Loaded profile config "ha-198834": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0916 23:57:56.561232 722351 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/config.json ...
I0916 23:57:56.562855 722351 out.go:179] * Starting "ha-198834-m03" control-plane node in "ha-198834" cluster
I0916 23:57:56.563951 722351 cache.go:123] Beginning downloading kic base image for docker with docker
I0916 23:57:56.565051 722351 out.go:179] * Pulling base image v0.0.48 ...
I0916 23:57:56.566271 722351 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0916 23:57:56.566290 722351 cache.go:58] Caching tarball of preloaded images
I0916 23:57:56.566373 722351 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0916 23:57:56.566383 722351 preload.go:172] Found /home/jenkins/minikube-integration/21550-661878/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0916 23:57:56.566485 722351 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on docker
I0916 23:57:56.566581 722351 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/config.json ...
I0916 23:57:56.586635 722351 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0916 23:57:56.586656 722351 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0916 23:57:56.586673 722351 cache.go:232] Successfully downloaded all kic artifacts
I0916 23:57:56.586704 722351 start.go:360] acquireMachinesLock for ha-198834-m03: {Name:mk4dabc098a240f7afab19054f40d0106bd7a469 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0916 23:57:56.586811 722351 start.go:364] duration metric: took 87.391µs to acquireMachinesLock for "ha-198834-m03"
I0916 23:57:56.586843 722351 start.go:93] Provisioning new machine with config: &{Name:ha-198834 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-198834 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m03 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:fals
e kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetP
ath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name:m03 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0916 23:57:56.587003 722351 start.go:125] createHost starting for "m03" (driver="docker")
I0916 23:57:56.589063 722351 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0916 23:57:56.589158 722351 start.go:159] libmachine.API.Create for "ha-198834" (driver="docker")
I0916 23:57:56.589187 722351 client.go:168] LocalClient.Create starting
I0916 23:57:56.589263 722351 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem
I0916 23:57:56.589299 722351 main.go:141] libmachine: Decoding PEM data...
I0916 23:57:56.589313 722351 main.go:141] libmachine: Parsing certificate...
I0916 23:57:56.589365 722351 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21550-661878/.minikube/certs/cert.pem
I0916 23:57:56.589385 722351 main.go:141] libmachine: Decoding PEM data...
I0916 23:57:56.589398 722351 main.go:141] libmachine: Parsing certificate...
I0916 23:57:56.589634 722351 cli_runner.go:164] Run: docker network inspect ha-198834 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0916 23:57:56.607248 722351 network_create.go:77] Found existing network {name:ha-198834 subnet:0xc001595440 gateway:[0 0 0 0 0 0 0 0 0 0 255 255 192 168 49 1] mtu:1500}
I0916 23:57:56.607297 722351 kic.go:121] calculated static IP "192.168.49.4" for the "ha-198834-m03" container
I0916 23:57:56.607371 722351 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0916 23:57:56.624198 722351 cli_runner.go:164] Run: docker volume create ha-198834-m03 --label name.minikube.sigs.k8s.io=ha-198834-m03 --label created_by.minikube.sigs.k8s.io=true
I0916 23:57:56.642183 722351 oci.go:103] Successfully created a docker volume ha-198834-m03
I0916 23:57:56.642258 722351 cli_runner.go:164] Run: docker run --rm --name ha-198834-m03-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-198834-m03 --entrypoint /usr/bin/test -v ha-198834-m03:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0916 23:57:57.021785 722351 oci.go:107] Successfully prepared a docker volume ha-198834-m03
I0916 23:57:57.021834 722351 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0916 23:57:57.021864 722351 kic.go:194] Starting extracting preloaded images to volume ...
I0916 23:57:57.021952 722351 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-661878/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-198834-m03:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0916 23:57:59.672995 722351 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21550-661878/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-198834-m03:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (2.650992477s)
I0916 23:57:59.673039 722351 kic.go:203] duration metric: took 2.651177157s to extract preloaded images to volume ...
W0916 23:57:59.673144 722351 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0916 23:57:59.673190 722351 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0916 23:57:59.673255 722351 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0916 23:57:59.730169 722351 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ha-198834-m03 --name ha-198834-m03 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-198834-m03 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ha-198834-m03 --network ha-198834 --ip 192.168.49.4 --volume ha-198834-m03:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0916 23:58:00.013728 722351 cli_runner.go:164] Run: docker container inspect ha-198834-m03 --format={{.State.Running}}
I0916 23:58:00.034076 722351 cli_runner.go:164] Run: docker container inspect ha-198834-m03 --format={{.State.Status}}
I0916 23:58:00.054832 722351 cli_runner.go:164] Run: docker exec ha-198834-m03 stat /var/lib/dpkg/alternatives/iptables
I0916 23:58:00.109517 722351 oci.go:144] the created container "ha-198834-m03" has a running status.
I0916 23:58:00.109546 722351 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m03/id_rsa...
I0916 23:58:00.621029 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m03/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0916 23:58:00.621097 722351 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m03/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0916 23:58:00.651614 722351 cli_runner.go:164] Run: docker container inspect ha-198834-m03 --format={{.State.Status}}
I0916 23:58:00.673435 722351 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0916 23:58:00.673460 722351 kic_runner.go:114] Args: [docker exec --privileged ha-198834-m03 chown docker:docker /home/docker/.ssh/authorized_keys]
I0916 23:58:00.730412 722351 cli_runner.go:164] Run: docker container inspect ha-198834-m03 --format={{.State.Status}}
I0916 23:58:00.749865 722351 machine.go:93] provisionDockerMachine start ...
I0916 23:58:00.750006 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m03
I0916 23:58:00.771445 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:58:00.771738 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0916 23:58:00.771754 722351 main.go:141] libmachine: About to run SSH command:
hostname
I0916 23:58:00.920523 722351 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-198834-m03
I0916 23:58:00.920553 722351 ubuntu.go:182] provisioning hostname "ha-198834-m03"
I0916 23:58:00.920616 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m03
I0916 23:58:00.940561 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:58:00.940837 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0916 23:58:00.940853 722351 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-198834-m03 && echo "ha-198834-m03" | sudo tee /etc/hostname
I0916 23:58:01.103101 722351 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-198834-m03
I0916 23:58:01.103204 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m03
I0916 23:58:01.125182 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:58:01.125511 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0916 23:58:01.125543 722351 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-198834-m03' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-198834-m03/g' /etc/hosts;
else
echo '127.0.1.1 ha-198834-m03' | sudo tee -a /etc/hosts;
fi
fi
I0916 23:58:01.275155 722351 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0916 23:58:01.275201 722351 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21550-661878/.minikube CaCertPath:/home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21550-661878/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21550-661878/.minikube}
I0916 23:58:01.275231 722351 ubuntu.go:190] setting up certificates
I0916 23:58:01.275246 722351 provision.go:84] configureAuth start
I0916 23:58:01.275318 722351 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-198834-m03
I0916 23:58:01.296305 722351 provision.go:143] copyHostCerts
I0916 23:58:01.296378 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/21550-661878/.minikube/ca.pem
I0916 23:58:01.296426 722351 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-661878/.minikube/ca.pem, removing ...
I0916 23:58:01.296439 722351 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-661878/.minikube/ca.pem
I0916 23:58:01.296527 722351 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21550-661878/.minikube/ca.pem (1078 bytes)
I0916 23:58:01.296632 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/21550-661878/.minikube/cert.pem
I0916 23:58:01.296656 722351 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-661878/.minikube/cert.pem, removing ...
I0916 23:58:01.296682 722351 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-661878/.minikube/cert.pem
I0916 23:58:01.296726 722351 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21550-661878/.minikube/cert.pem (1123 bytes)
I0916 23:58:01.296788 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/21550-661878/.minikube/key.pem
I0916 23:58:01.296825 722351 exec_runner.go:144] found /home/jenkins/minikube-integration/21550-661878/.minikube/key.pem, removing ...
I0916 23:58:01.296835 722351 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21550-661878/.minikube/key.pem
I0916 23:58:01.296924 722351 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21550-661878/.minikube/key.pem (1679 bytes)
I0916 23:58:01.297040 722351 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21550-661878/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca-key.pem org=jenkins.ha-198834-m03 san=[127.0.0.1 192.168.49.4 ha-198834-m03 localhost minikube]
I0916 23:58:02.100987 722351 provision.go:177] copyRemoteCerts
I0916 23:58:02.101048 722351 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0916 23:58:02.101084 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m03
I0916 23:58:02.119475 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m03/id_rsa Username:docker}
I0916 23:58:02.218802 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/machines/server.pem -> /etc/docker/server.pem
I0916 23:58:02.218870 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0916 23:58:02.251628 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0916 23:58:02.251700 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0916 23:58:02.279052 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0916 23:58:02.279124 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0916 23:58:02.305168 722351 provision.go:87] duration metric: took 1.029902032s to configureAuth
I0916 23:58:02.305208 722351 ubuntu.go:206] setting minikube options for container-runtime
I0916 23:58:02.305440 722351 config.go:182] Loaded profile config "ha-198834": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0916 23:58:02.305491 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m03
I0916 23:58:02.322139 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:58:02.322413 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0916 23:58:02.322428 722351 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0916 23:58:02.459594 722351 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0916 23:58:02.459629 722351 ubuntu.go:71] root file system type: overlay
I0916 23:58:02.459746 722351 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0916 23:58:02.459804 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m03
I0916 23:58:02.476657 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:58:02.476985 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0916 23:58:02.477099 722351 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
Environment="NO_PROXY=192.168.49.2"
Environment="NO_PROXY=192.168.49.2,192.168.49.3"
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0916 23:58:02.633394 722351 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
Environment=NO_PROXY=192.168.49.2
Environment=NO_PROXY=192.168.49.2,192.168.49.3
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I0916 23:58:02.633489 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m03
I0916 23:58:02.651145 722351 main.go:141] libmachine: Using SSH client type: native
I0916 23:58:02.651390 722351 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0916 23:58:02.651410 722351 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0916 23:58:03.800032 722351 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-09-03 20:55:49.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2025-09-16 23:58:02.631485455 +0000
@@ -9,23 +9,36 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+Environment=NO_PROXY=192.168.49.2
+Environment=NO_PROXY=192.168.49.2,192.168.49.3
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0916 23:58:03.800077 722351 machine.go:96] duration metric: took 3.050188223s to provisionDockerMachine
I0916 23:58:03.800094 722351 client.go:171] duration metric: took 7.210891992s to LocalClient.Create
I0916 23:58:03.800121 722351 start.go:167] duration metric: took 7.210962522s to libmachine.API.Create "ha-198834"
I0916 23:58:03.800131 722351 start.go:293] postStartSetup for "ha-198834-m03" (driver="docker")
I0916 23:58:03.800155 722351 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0916 23:58:03.800229 722351 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0916 23:58:03.800295 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m03
I0916 23:58:03.817949 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m03/id_rsa Username:docker}
I0916 23:58:03.918038 722351 ssh_runner.go:195] Run: cat /etc/os-release
I0916 23:58:03.922382 722351 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0916 23:58:03.922420 722351 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0916 23:58:03.922430 722351 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0916 23:58:03.922438 722351 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0916 23:58:03.922452 722351 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-661878/.minikube/addons for local assets ...
I0916 23:58:03.922512 722351 filesync.go:126] Scanning /home/jenkins/minikube-integration/21550-661878/.minikube/files for local assets ...
I0916 23:58:03.922607 722351 filesync.go:149] local asset: /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem -> 6653992.pem in /etc/ssl/certs
I0916 23:58:03.922620 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem -> /etc/ssl/certs/6653992.pem
I0916 23:58:03.922727 722351 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0916 23:58:03.932298 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem --> /etc/ssl/certs/6653992.pem (1708 bytes)
I0916 23:58:03.961387 722351 start.go:296] duration metric: took 161.230642ms for postStartSetup
I0916 23:58:03.961811 722351 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-198834-m03
I0916 23:58:03.979123 722351 profile.go:143] Saving config to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/config.json ...
I0916 23:58:03.979395 722351 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0916 23:58:03.979437 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m03
I0916 23:58:03.997520 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m03/id_rsa Username:docker}
I0916 23:58:04.091253 722351 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0916 23:58:04.096537 722351 start.go:128] duration metric: took 7.509514126s to createHost
I0916 23:58:04.096585 722351 start.go:83] releasing machines lock for "ha-198834-m03", held for 7.509743952s
I0916 23:58:04.096660 722351 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-198834-m03
I0916 23:58:04.115702 722351 out.go:179] * Found network options:
I0916 23:58:04.117029 722351 out.go:179] - NO_PROXY=192.168.49.2,192.168.49.3
W0916 23:58:04.118232 722351 proxy.go:120] fail to check proxy env: Error ip not in block
W0916 23:58:04.118256 722351 proxy.go:120] fail to check proxy env: Error ip not in block
W0916 23:58:04.118281 722351 proxy.go:120] fail to check proxy env: Error ip not in block
W0916 23:58:04.118299 722351 proxy.go:120] fail to check proxy env: Error ip not in block
I0916 23:58:04.118395 722351 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0916 23:58:04.118441 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m03
I0916 23:58:04.118449 722351 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0916 23:58:04.118515 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834-m03
I0916 23:58:04.136875 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m03/id_rsa Username:docker}
I0916 23:58:04.137594 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834-m03/id_rsa Username:docker}
I0916 23:58:04.231418 722351 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0916 23:58:04.311016 722351 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0916 23:58:04.311108 722351 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0916 23:58:04.340810 722351 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0916 23:58:04.340841 722351 start.go:495] detecting cgroup driver to use...
I0916 23:58:04.340871 722351 detect.go:190] detected "systemd" cgroup driver on host os
I0916 23:58:04.340997 722351 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0916 23:58:04.359059 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0916 23:58:04.371794 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0916 23:58:04.383345 722351 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0916 23:58:04.383421 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0916 23:58:04.394513 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0916 23:58:04.405081 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0916 23:58:04.415653 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0916 23:58:04.426510 722351 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0916 23:58:04.436405 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0916 23:58:04.447135 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0916 23:58:04.457926 722351 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0916 23:58:04.469563 722351 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0916 23:58:04.478599 722351 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0916 23:58:04.488307 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:58:04.557785 722351 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0916 23:58:04.636805 722351 start.go:495] detecting cgroup driver to use...
I0916 23:58:04.636855 722351 detect.go:190] detected "systemd" cgroup driver on host os
I0916 23:58:04.636899 722351 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0916 23:58:04.649865 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0916 23:58:04.662323 722351 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0916 23:58:04.680711 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0916 23:58:04.693319 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0916 23:58:04.705665 722351 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0916 23:58:04.723842 722351 ssh_runner.go:195] Run: which cri-dockerd
I0916 23:58:04.727547 722351 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0916 23:58:04.738845 722351 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I0916 23:58:04.758974 722351 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0916 23:58:04.830471 722351 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0916 23:58:04.900429 722351 docker.go:575] configuring docker to use "systemd" as cgroup driver...
I0916 23:58:04.900482 722351 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes)
I0916 23:58:04.920093 722351 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I0916 23:58:04.931599 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:58:05.002855 722351 ssh_runner.go:195] Run: sudo systemctl restart docker
I0916 23:58:05.807532 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0916 23:58:05.819728 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0916 23:58:05.832303 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0916 23:58:05.844347 722351 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0916 23:58:05.916277 722351 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0916 23:58:05.988520 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:58:06.055206 722351 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0916 23:58:06.080490 722351 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I0916 23:58:06.092817 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:58:06.162707 722351 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0916 23:58:06.248276 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0916 23:58:06.261931 722351 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0916 23:58:06.262000 722351 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0916 23:58:06.265868 722351 start.go:563] Will wait 60s for crictl version
I0916 23:58:06.265941 722351 ssh_runner.go:195] Run: which crictl
I0916 23:58:06.269385 722351 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0916 23:58:06.305058 722351 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.4.0
RuntimeApiVersion: v1
I0916 23:58:06.305139 722351 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0916 23:58:06.331725 722351 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0916 23:58:06.358446 722351 out.go:252] * Preparing Kubernetes v1.34.0 on Docker 28.4.0 ...
I0916 23:58:06.359714 722351 out.go:179] - env NO_PROXY=192.168.49.2
I0916 23:58:06.360964 722351 out.go:179] - env NO_PROXY=192.168.49.2,192.168.49.3
I0916 23:58:06.362187 722351 cli_runner.go:164] Run: docker network inspect ha-198834 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0916 23:58:06.379025 722351 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0916 23:58:06.383173 722351 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0916 23:58:06.394963 722351 mustload.go:65] Loading cluster: ha-198834
I0916 23:58:06.395208 722351 config.go:182] Loaded profile config "ha-198834": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0916 23:58:06.395415 722351 cli_runner.go:164] Run: docker container inspect ha-198834 --format={{.State.Status}}
I0916 23:58:06.412700 722351 host.go:66] Checking if "ha-198834" exists ...
I0916 23:58:06.412979 722351 certs.go:68] Setting up /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834 for IP: 192.168.49.4
I0916 23:58:06.412992 722351 certs.go:194] generating shared ca certs ...
I0916 23:58:06.413008 722351 certs.go:226] acquiring lock for ca certs: {Name:mk24ad2a96dc59b16a9413b27c57b0ccb7d8ca57 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:58:06.413150 722351 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21550-661878/.minikube/ca.key
I0916 23:58:06.413202 722351 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21550-661878/.minikube/proxy-client-ca.key
I0916 23:58:06.413213 722351 certs.go:256] generating profile certs ...
I0916 23:58:06.413290 722351 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.key
I0916 23:58:06.413316 722351 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key.49f70783
I0916 23:58:06.413331 722351 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt.49f70783 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.3 192.168.49.4 192.168.49.254]
I0916 23:58:07.059616 722351 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt.49f70783 ...
I0916 23:58:07.059648 722351 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt.49f70783: {Name:mka6f3e20ae0db98330bce12c7c53c8ceb029f1c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:58:07.059850 722351 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key.49f70783 ...
I0916 23:58:07.059873 722351 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key.49f70783: {Name:mk88fba5116449476945068bb066a5fae095ca41 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 23:58:07.060019 722351 certs.go:381] copying /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt.49f70783 -> /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt
I0916 23:58:07.060173 722351 certs.go:385] copying /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key.49f70783 -> /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key
I0916 23:58:07.060303 722351 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.key
I0916 23:58:07.060320 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0916 23:58:07.060332 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0916 23:58:07.060346 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0916 23:58:07.060359 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0916 23:58:07.060371 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0916 23:58:07.060383 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0916 23:58:07.060395 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0916 23:58:07.060407 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0916 23:58:07.060462 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/665399.pem (1338 bytes)
W0916 23:58:07.060492 722351 certs.go:480] ignoring /home/jenkins/minikube-integration/21550-661878/.minikube/certs/665399_empty.pem, impossibly tiny 0 bytes
I0916 23:58:07.060502 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca-key.pem (1675 bytes)
I0916 23:58:07.060525 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/ca.pem (1078 bytes)
I0916 23:58:07.060546 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/cert.pem (1123 bytes)
I0916 23:58:07.060571 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/key.pem (1679 bytes)
I0916 23:58:07.060609 722351 certs.go:484] found cert: /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem (1708 bytes)
I0916 23:58:07.060634 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/certs/665399.pem -> /usr/share/ca-certificates/665399.pem
I0916 23:58:07.060648 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem -> /usr/share/ca-certificates/6653992.pem
I0916 23:58:07.060666 722351 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0916 23:58:07.060725 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:58:07.077675 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834/id_rsa Username:docker}
I0916 23:58:07.167227 722351 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.pub
I0916 23:58:07.171339 722351 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.pub --> memory (451 bytes)
I0916 23:58:07.184631 722351 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.key
I0916 23:58:07.188345 722351 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.key --> memory (1675 bytes)
I0916 23:58:07.201195 722351 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.crt
I0916 23:58:07.204727 722351 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.crt --> memory (1123 bytes)
I0916 23:58:07.217344 722351 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.key
I0916 23:58:07.220977 722351 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.key --> memory (1675 bytes)
I0916 23:58:07.233804 722351 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.crt
I0916 23:58:07.237296 722351 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.crt --> memory (1094 bytes)
I0916 23:58:07.250936 722351 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.key
I0916 23:58:07.254504 722351 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.key --> memory (1675 bytes)
I0916 23:58:07.267513 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0916 23:58:07.293250 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0916 23:58:07.319357 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0916 23:58:07.345045 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0916 23:58:07.370793 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1444 bytes)
I0916 23:58:07.397411 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0916 23:58:07.422329 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0916 23:58:07.447186 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0916 23:58:07.472564 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/certs/665399.pem --> /usr/share/ca-certificates/665399.pem (1338 bytes)
I0916 23:58:07.500373 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/files/etc/ssl/certs/6653992.pem --> /usr/share/ca-certificates/6653992.pem (1708 bytes)
I0916 23:58:07.526598 722351 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0916 23:58:07.552426 722351 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.pub (451 bytes)
I0916 23:58:07.570062 722351 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.key (1675 bytes)
I0916 23:58:07.589628 722351 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.crt (1123 bytes)
I0916 23:58:07.609486 722351 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.key (1675 bytes)
I0916 23:58:07.630629 722351 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.crt (1094 bytes)
I0916 23:58:07.650280 722351 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.key (1675 bytes)
I0916 23:58:07.669308 722351 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (744 bytes)
I0916 23:58:07.687700 722351 ssh_runner.go:195] Run: openssl version
I0916 23:58:07.694681 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/665399.pem && ln -fs /usr/share/ca-certificates/665399.pem /etc/ssl/certs/665399.pem"
I0916 23:58:07.705784 722351 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/665399.pem
I0916 23:58:07.709662 722351 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 16 23:53 /usr/share/ca-certificates/665399.pem
I0916 23:58:07.709739 722351 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/665399.pem
I0916 23:58:07.716649 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/665399.pem /etc/ssl/certs/51391683.0"
I0916 23:58:07.726290 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/6653992.pem && ln -fs /usr/share/ca-certificates/6653992.pem /etc/ssl/certs/6653992.pem"
I0916 23:58:07.736118 722351 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/6653992.pem
I0916 23:58:07.740041 722351 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 16 23:53 /usr/share/ca-certificates/6653992.pem
I0916 23:58:07.740101 722351 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/6653992.pem
I0916 23:58:07.747081 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/6653992.pem /etc/ssl/certs/3ec20f2e.0"
I0916 23:58:07.757480 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0916 23:58:07.767310 722351 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0916 23:58:07.771054 722351 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 16 23:48 /usr/share/ca-certificates/minikubeCA.pem
I0916 23:58:07.771114 722351 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0916 23:58:07.778013 722351 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0916 23:58:07.788245 722351 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0916 23:58:07.792058 722351 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0916 23:58:07.792123 722351 kubeadm.go:926] updating node {m03 192.168.49.4 8443 v1.34.0 docker true true} ...
I0916 23:58:07.792232 722351 kubeadm.go:938] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-198834-m03 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.4
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:ha-198834 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0916 23:58:07.792263 722351 kube-vip.go:115] generating kube-vip config ...
I0916 23:58:07.792307 722351 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I0916 23:58:07.805180 722351 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I0916 23:58:07.805247 722351 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v1.0.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/admin.conf"
name: kubeconfig
status: {}
I0916 23:58:07.805296 722351 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0916 23:58:07.814610 722351 binaries.go:44] Found k8s binaries, skipping transfer
I0916 23:58:07.814678 722351 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /etc/kubernetes/manifests
I0916 23:58:07.825352 722351 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I0916 23:58:07.844047 722351 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0916 23:58:07.862757 722351 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1358 bytes)
I0916 23:58:07.883848 722351 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I0916 23:58:07.887562 722351 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0916 23:58:07.899646 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:58:07.974384 722351 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0916 23:58:08.004718 722351 host.go:66] Checking if "ha-198834" exists ...
I0916 23:58:08.005001 722351 start.go:317] joinCluster: &{Name:ha-198834 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-198834 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[]
DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m03 IP:192.168.49.4 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:f
alse logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticI
P: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0916 23:58:08.005124 722351 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm token create --print-join-command --ttl=0"
I0916 23:58:08.005169 722351 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-198834
I0916 23:58:08.024622 722351 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21550-661878/.minikube/machines/ha-198834/id_rsa Username:docker}
I0916 23:58:08.169785 722351 start.go:343] trying to join control-plane node "m03" to cluster: &{Name:m03 IP:192.168.49.4 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0916 23:58:08.169853 722351 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token 2dm2r7.tavul8zm4b55qd6q --discovery-token-ca-cert-hash sha256:aa81889111a47026234b9abf30001d51a9462bec006420d404163720ad63709e --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock --node-name=ha-198834-m03 --control-plane --apiserver-advertise-address=192.168.49.4 --apiserver-bind-port=8443"
I0916 23:58:25.708852 722351 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token 2dm2r7.tavul8zm4b55qd6q --discovery-token-ca-cert-hash sha256:aa81889111a47026234b9abf30001d51a9462bec006420d404163720ad63709e --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock --node-name=ha-198834-m03 --control-plane --apiserver-advertise-address=192.168.49.4 --apiserver-bind-port=8443": (17.538975369s)
I0916 23:58:25.708884 722351 ssh_runner.go:195] Run: /bin/bash -c "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet"
I0916 23:58:25.930343 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes ha-198834-m03 minikube.k8s.io/updated_at=2025_09_16T23_58_25_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a minikube.k8s.io/name=ha-198834 minikube.k8s.io/primary=false
I0916 23:58:26.006016 722351 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig taint nodes ha-198834-m03 node-role.kubernetes.io/control-plane:NoSchedule-
I0916 23:58:26.089408 722351 start.go:319] duration metric: took 18.084403561s to joinCluster
I0916 23:58:26.089494 722351 start.go:235] Will wait 6m0s for node &{Name:m03 IP:192.168.49.4 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0916 23:58:26.089805 722351 config.go:182] Loaded profile config "ha-198834": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0916 23:58:26.091004 722351 out.go:179] * Verifying Kubernetes components...
I0916 23:58:26.092246 722351 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 23:58:26.200675 722351 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0916 23:58:26.214424 722351 kapi.go:59] client config for ha-198834: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.crt", KeyFile:"/home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.key", CAFile:"/home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4620), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
W0916 23:58:26.214506 722351 kubeadm.go:483] Overriding stale ClientConfig host https://192.168.49.254:8443 with https://192.168.49.2:8443
I0916 23:58:26.214713 722351 node_ready.go:35] waiting up to 6m0s for node "ha-198834-m03" to be "Ready" ...
W0916 23:58:28.218137 722351 node_ready.go:57] node "ha-198834-m03" has "Ready":"False" status (will retry)
I0916 23:58:29.718579 722351 node_ready.go:49] node "ha-198834-m03" is "Ready"
I0916 23:58:29.718621 722351 node_ready.go:38] duration metric: took 3.503891029s for node "ha-198834-m03" to be "Ready" ...
I0916 23:58:29.718640 722351 api_server.go:52] waiting for apiserver process to appear ...
I0916 23:58:29.718688 722351 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0916 23:58:29.730821 722351 api_server.go:72] duration metric: took 3.641289304s to wait for apiserver process to appear ...
I0916 23:58:29.730847 722351 api_server.go:88] waiting for apiserver healthz status ...
I0916 23:58:29.730870 722351 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0916 23:58:29.736447 722351 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0916 23:58:29.737363 722351 api_server.go:141] control plane version: v1.34.0
I0916 23:58:29.737382 722351 api_server.go:131] duration metric: took 6.528439ms to wait for apiserver health ...
I0916 23:58:29.737390 722351 system_pods.go:43] waiting for kube-system pods to appear ...
I0916 23:58:29.743125 722351 system_pods.go:59] 27 kube-system pods found
I0916 23:58:29.743154 722351 system_pods.go:61] "coredns-66bc5c9577-5wx4k" [6f279fd8-dd3c-49a5-863d-a53124ecf1f5] Running
I0916 23:58:29.743159 722351 system_pods.go:61] "coredns-66bc5c9577-mjbz6" [c918625f-be11-44bf-8b82-d4c21b8993d1] Running
I0916 23:58:29.743162 722351 system_pods.go:61] "etcd-ha-198834" [8374ebf7-cb1d-422e-8768-584e07b2dcab] Running
I0916 23:58:29.743166 722351 system_pods.go:61] "etcd-ha-198834-m02" [222eaa2a-824e-4087-a614-f5f5a6de8e98] Running
I0916 23:58:29.743169 722351 system_pods.go:61] "etcd-ha-198834-m03" [07a1b36a-f633-4f93-a8c2-1bc7bc4ce072] Pending
I0916 23:58:29.743172 722351 system_pods.go:61] "kindnet-2vbn5" [acd8be88-6ee7-4832-830f-c98aaabacd81] Running
I0916 23:58:29.743179 722351 system_pods.go:61] "kindnet-8klgc" [a5699c22-8aa3-4159-bb6d-261cbb15bcd1] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-8klgc": pod kindnet-8klgc is already assigned to node "ha-198834-m03")
I0916 23:58:29.743182 722351 system_pods.go:61] "kindnet-h28vp" [6c51d39f-7e43-461b-a021-13ddf0cb9845] Running
I0916 23:58:29.743189 722351 system_pods.go:61] "kindnet-qmgt6" [dea81557-acc3-41e3-8160-712870aba14c] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-qmgt6": pod kindnet-qmgt6 is already assigned to node "ha-198834-m03")
I0916 23:58:29.743193 722351 system_pods.go:61] "kube-apiserver-ha-198834" [5176645e-1819-4ab4-add3-9355f9a506ce] Running
I0916 23:58:29.743198 722351 system_pods.go:61] "kube-apiserver-ha-198834-m02" [a2c8eefd-3b40-484d-9939-74b5fdba7182] Running
I0916 23:58:29.743202 722351 system_pods.go:61] "kube-apiserver-ha-198834-m03" [6b3daabc-2aec-427f-8ee1-b89cc599cfe1] Pending
I0916 23:58:29.743206 722351 system_pods.go:61] "kube-controller-manager-ha-198834" [36327629-7bc1-440d-b760-3fdf88af1b03] Running
I0916 23:58:29.743209 722351 system_pods.go:61] "kube-controller-manager-ha-198834-m02" [434a65bb-a306-4798-81f9-9631313ba763] Running
I0916 23:58:29.743212 722351 system_pods.go:61] "kube-controller-manager-ha-198834-m03" [bb6c5982-6f3f-4ac2-ad73-2044b6b73019] Pending
I0916 23:58:29.743216 722351 system_pods.go:61] "kube-proxy-5tkhn" [5edbfebe-2590-4d23-b80e-7496a4e9a5b6] Running
I0916 23:58:29.743220 722351 system_pods.go:61] "kube-proxy-d8brp" [00263ada-ca4e-4585-b712-19f6e60ce72b] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-d8brp": pod kube-proxy-d8brp is already assigned to node "ha-198834-m03")
I0916 23:58:29.743227 722351 system_pods.go:61] "kube-proxy-h2fxd" [db1b17f7-7be8-46ef-8eb3-98432a2eec18] Running
I0916 23:58:29.743231 722351 system_pods.go:61] "kube-proxy-nj7bh" [a5c775e6-81f4-47ce-966b-598b21714409] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-nj7bh": pod kube-proxy-nj7bh is already assigned to node "ha-198834-m03")
I0916 23:58:29.743236 722351 system_pods.go:61] "kube-proxy-q9ggj" [fdedb871-6b9e-4c4e-9ef7-337d04c8c30a] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-q9ggj": pod kube-proxy-q9ggj is already assigned to node "ha-198834-m03")
I0916 23:58:29.743241 722351 system_pods.go:61] "kube-scheduler-ha-198834" [45afa1e0-273e-44fc-b170-bdc7a365273e] Running
I0916 23:58:29.743245 722351 system_pods.go:61] "kube-scheduler-ha-198834-m02" [633b0b32-1d8b-4301-85a5-8c36f53296e3] Running
I0916 23:58:29.743248 722351 system_pods.go:61] "kube-scheduler-ha-198834-m03" [bc9f09c0-2af3-4108-b0d5-116e3d07d4b6] Pending
I0916 23:58:29.743251 722351 system_pods.go:61] "kube-vip-ha-198834" [cde651e3-1550-48cb-a5dc-09d55185429b] Running
I0916 23:58:29.743254 722351 system_pods.go:61] "kube-vip-ha-198834-m02" [a9e3b24d-529d-409c-982f-c72bd0cc4693] Running
I0916 23:58:29.743257 722351 system_pods.go:61] "kube-vip-ha-198834-m03" [608ad5d9-c8f7-4a62-a1f3-8cdac07ca388] Pending
I0916 23:58:29.743260 722351 system_pods.go:61] "storage-provisioner" [6b6f64f3-2647-4e13-be41-47fcc6111f3e] Running
I0916 23:58:29.743267 722351 system_pods.go:74] duration metric: took 5.871633ms to wait for pod list to return data ...
I0916 23:58:29.743275 722351 default_sa.go:34] waiting for default service account to be created ...
I0916 23:58:29.746038 722351 default_sa.go:45] found service account: "default"
I0916 23:58:29.746059 722351 default_sa.go:55] duration metric: took 2.77496ms for default service account to be created ...
I0916 23:58:29.746067 722351 system_pods.go:116] waiting for k8s-apps to be running ...
I0916 23:58:29.751428 722351 system_pods.go:86] 27 kube-system pods found
I0916 23:58:29.751454 722351 system_pods.go:89] "coredns-66bc5c9577-5wx4k" [6f279fd8-dd3c-49a5-863d-a53124ecf1f5] Running
I0916 23:58:29.751459 722351 system_pods.go:89] "coredns-66bc5c9577-mjbz6" [c918625f-be11-44bf-8b82-d4c21b8993d1] Running
I0916 23:58:29.751463 722351 system_pods.go:89] "etcd-ha-198834" [8374ebf7-cb1d-422e-8768-584e07b2dcab] Running
I0916 23:58:29.751466 722351 system_pods.go:89] "etcd-ha-198834-m02" [222eaa2a-824e-4087-a614-f5f5a6de8e98] Running
I0916 23:58:29.751469 722351 system_pods.go:89] "etcd-ha-198834-m03" [07a1b36a-f633-4f93-a8c2-1bc7bc4ce072] Pending
I0916 23:58:29.751472 722351 system_pods.go:89] "kindnet-2vbn5" [acd8be88-6ee7-4832-830f-c98aaabacd81] Running
I0916 23:58:29.751478 722351 system_pods.go:89] "kindnet-8klgc" [a5699c22-8aa3-4159-bb6d-261cbb15bcd1] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-8klgc": pod kindnet-8klgc is already assigned to node "ha-198834-m03")
I0916 23:58:29.751482 722351 system_pods.go:89] "kindnet-h28vp" [6c51d39f-7e43-461b-a021-13ddf0cb9845] Running
I0916 23:58:29.751490 722351 system_pods.go:89] "kindnet-qmgt6" [dea81557-acc3-41e3-8160-712870aba14c] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kindnet-qmgt6": pod kindnet-qmgt6 is already assigned to node "ha-198834-m03")
I0916 23:58:29.751494 722351 system_pods.go:89] "kube-apiserver-ha-198834" [5176645e-1819-4ab4-add3-9355f9a506ce] Running
I0916 23:58:29.751498 722351 system_pods.go:89] "kube-apiserver-ha-198834-m02" [a2c8eefd-3b40-484d-9939-74b5fdba7182] Running
I0916 23:58:29.751501 722351 system_pods.go:89] "kube-apiserver-ha-198834-m03" [6b3daabc-2aec-427f-8ee1-b89cc599cfe1] Pending
I0916 23:58:29.751504 722351 system_pods.go:89] "kube-controller-manager-ha-198834" [36327629-7bc1-440d-b760-3fdf88af1b03] Running
I0916 23:58:29.751508 722351 system_pods.go:89] "kube-controller-manager-ha-198834-m02" [434a65bb-a306-4798-81f9-9631313ba763] Running
I0916 23:58:29.751512 722351 system_pods.go:89] "kube-controller-manager-ha-198834-m03" [bb6c5982-6f3f-4ac2-ad73-2044b6b73019] Pending
I0916 23:58:29.751515 722351 system_pods.go:89] "kube-proxy-5tkhn" [5edbfebe-2590-4d23-b80e-7496a4e9a5b6] Running
I0916 23:58:29.751520 722351 system_pods.go:89] "kube-proxy-d8brp" [00263ada-ca4e-4585-b712-19f6e60ce72b] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-d8brp": pod kube-proxy-d8brp is already assigned to node "ha-198834-m03")
I0916 23:58:29.751526 722351 system_pods.go:89] "kube-proxy-h2fxd" [db1b17f7-7be8-46ef-8eb3-98432a2eec18] Running
I0916 23:58:29.751530 722351 system_pods.go:89] "kube-proxy-nj7bh" [a5c775e6-81f4-47ce-966b-598b21714409] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-nj7bh": pod kube-proxy-nj7bh is already assigned to node "ha-198834-m03")
I0916 23:58:29.751535 722351 system_pods.go:89] "kube-proxy-q9ggj" [fdedb871-6b9e-4c4e-9ef7-337d04c8c30a] Pending: PodScheduled:SchedulerError (running Bind plugin "DefaultBinder": Operation cannot be fulfilled on pods/binding "kube-proxy-q9ggj": pod kube-proxy-q9ggj is already assigned to node "ha-198834-m03")
I0916 23:58:29.751540 722351 system_pods.go:89] "kube-scheduler-ha-198834" [45afa1e0-273e-44fc-b170-bdc7a365273e] Running
I0916 23:58:29.751545 722351 system_pods.go:89] "kube-scheduler-ha-198834-m02" [633b0b32-1d8b-4301-85a5-8c36f53296e3] Running
I0916 23:58:29.751550 722351 system_pods.go:89] "kube-scheduler-ha-198834-m03" [bc9f09c0-2af3-4108-b0d5-116e3d07d4b6] Pending
I0916 23:58:29.751554 722351 system_pods.go:89] "kube-vip-ha-198834" [cde651e3-1550-48cb-a5dc-09d55185429b] Running
I0916 23:58:29.751558 722351 system_pods.go:89] "kube-vip-ha-198834-m02" [a9e3b24d-529d-409c-982f-c72bd0cc4693] Running
I0916 23:58:29.751563 722351 system_pods.go:89] "kube-vip-ha-198834-m03" [608ad5d9-c8f7-4a62-a1f3-8cdac07ca388] Pending
I0916 23:58:29.751569 722351 system_pods.go:89] "storage-provisioner" [6b6f64f3-2647-4e13-be41-47fcc6111f3e] Running
I0916 23:58:29.751577 722351 system_pods.go:126] duration metric: took 5.505301ms to wait for k8s-apps to be running ...
I0916 23:58:29.751587 722351 system_svc.go:44] waiting for kubelet service to be running ....
I0916 23:58:29.751637 722351 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0916 23:58:29.764067 722351 system_svc.go:56] duration metric: took 12.467532ms WaitForService to wait for kubelet
I0916 23:58:29.764102 722351 kubeadm.go:578] duration metric: took 3.674577242s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0916 23:58:29.764127 722351 node_conditions.go:102] verifying NodePressure condition ...
I0916 23:58:29.767676 722351 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0916 23:58:29.767699 722351 node_conditions.go:123] node cpu capacity is 8
I0916 23:58:29.767712 722351 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0916 23:58:29.767717 722351 node_conditions.go:123] node cpu capacity is 8
I0916 23:58:29.767721 722351 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0916 23:58:29.767724 722351 node_conditions.go:123] node cpu capacity is 8
I0916 23:58:29.767728 722351 node_conditions.go:105] duration metric: took 3.595861ms to run NodePressure ...
I0916 23:58:29.767739 722351 start.go:241] waiting for startup goroutines ...
I0916 23:58:29.767761 722351 start.go:255] writing updated cluster config ...
I0916 23:58:29.768076 722351 ssh_runner.go:195] Run: rm -f paused
I0916 23:58:29.772054 722351 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I0916 23:58:29.772528 722351 kapi.go:59] client config for ha-198834: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.crt", KeyFile:"/home/jenkins/minikube-integration/21550-661878/.minikube/profiles/ha-198834/client.key", CAFile:"/home/jenkins/minikube-integration/21550-661878/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4620), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0916 23:58:29.776391 722351 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-5wx4k" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:29.781517 722351 pod_ready.go:94] pod "coredns-66bc5c9577-5wx4k" is "Ready"
I0916 23:58:29.781544 722351 pod_ready.go:86] duration metric: took 5.128752ms for pod "coredns-66bc5c9577-5wx4k" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:29.781552 722351 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-mjbz6" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:29.786524 722351 pod_ready.go:94] pod "coredns-66bc5c9577-mjbz6" is "Ready"
I0916 23:58:29.786549 722351 pod_ready.go:86] duration metric: took 4.991527ms for pod "coredns-66bc5c9577-mjbz6" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:29.789148 722351 pod_ready.go:83] waiting for pod "etcd-ha-198834" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:29.793593 722351 pod_ready.go:94] pod "etcd-ha-198834" is "Ready"
I0916 23:58:29.793614 722351 pod_ready.go:86] duration metric: took 4.43654ms for pod "etcd-ha-198834" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:29.793622 722351 pod_ready.go:83] waiting for pod "etcd-ha-198834-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:29.797833 722351 pod_ready.go:94] pod "etcd-ha-198834-m02" is "Ready"
I0916 23:58:29.797856 722351 pod_ready.go:86] duration metric: took 4.228462ms for pod "etcd-ha-198834-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:29.797864 722351 pod_ready.go:83] waiting for pod "etcd-ha-198834-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:29.974055 722351 request.go:683] "Waited before sending request" delay="176.0853ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/etcd-ha-198834-m03"
I0916 23:58:30.173047 722351 request.go:683] "Waited before sending request" delay="193.205885ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-198834-m03"
I0916 23:58:30.373324 722351 request.go:683] "Waited before sending request" delay="74.260595ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/etcd-ha-198834-m03"
I0916 23:58:30.573189 722351 request.go:683] "Waited before sending request" delay="196.187075ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-198834-m03"
I0916 23:58:30.973960 722351 request.go:683] "Waited before sending request" delay="171.749825ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-198834-m03"
I0916 23:58:30.977519 722351 pod_ready.go:94] pod "etcd-ha-198834-m03" is "Ready"
I0916 23:58:30.977548 722351 pod_ready.go:86] duration metric: took 1.179678858s for pod "etcd-ha-198834-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:31.172996 722351 request.go:683] "Waited before sending request" delay="195.270589ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods?labelSelector=component%3Dkube-apiserver"
I0916 23:58:31.176896 722351 pod_ready.go:83] waiting for pod "kube-apiserver-ha-198834" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:31.373184 722351 request.go:683] "Waited before sending request" delay="196.155083ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-198834"
I0916 23:58:31.573091 722351 request.go:683] "Waited before sending request" delay="196.292532ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-198834"
I0916 23:58:31.576254 722351 pod_ready.go:94] pod "kube-apiserver-ha-198834" is "Ready"
I0916 23:58:31.576280 722351 pod_ready.go:86] duration metric: took 399.33205ms for pod "kube-apiserver-ha-198834" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:31.576288 722351 pod_ready.go:83] waiting for pod "kube-apiserver-ha-198834-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:31.773718 722351 request.go:683] "Waited before sending request" delay="197.34633ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-198834-m02"
I0916 23:58:31.973716 722351 request.go:683] "Waited before sending request" delay="196.477986ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-198834-m02"
I0916 23:58:31.978504 722351 pod_ready.go:94] pod "kube-apiserver-ha-198834-m02" is "Ready"
I0916 23:58:31.978555 722351 pod_ready.go:86] duration metric: took 402.258846ms for pod "kube-apiserver-ha-198834-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:31.978567 722351 pod_ready.go:83] waiting for pod "kube-apiserver-ha-198834-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:32.172964 722351 request.go:683] "Waited before sending request" delay="194.26238ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-198834-m03"
I0916 23:58:32.373491 722351 request.go:683] "Waited before sending request" delay="197.345263ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-198834-m03"
I0916 23:58:32.376525 722351 pod_ready.go:94] pod "kube-apiserver-ha-198834-m03" is "Ready"
I0916 23:58:32.376552 722351 pod_ready.go:86] duration metric: took 397.9768ms for pod "kube-apiserver-ha-198834-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:32.573017 722351 request.go:683] "Waited before sending request" delay="196.299414ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods?labelSelector=component%3Dkube-controller-manager"
I0916 23:58:32.577487 722351 pod_ready.go:83] waiting for pod "kube-controller-manager-ha-198834" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:32.773969 722351 request.go:683] "Waited before sending request" delay="196.341624ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-198834"
I0916 23:58:32.973585 722351 request.go:683] "Waited before sending request" delay="196.346276ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-198834"
I0916 23:58:32.977689 722351 pod_ready.go:94] pod "kube-controller-manager-ha-198834" is "Ready"
I0916 23:58:32.977721 722351 pod_ready.go:86] duration metric: took 400.206125ms for pod "kube-controller-manager-ha-198834" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:32.977735 722351 pod_ready.go:83] waiting for pod "kube-controller-manager-ha-198834-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:33.173032 722351 request.go:683] "Waited before sending request" delay="195.180271ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-198834-m02"
I0916 23:58:33.373811 722351 request.go:683] "Waited before sending request" delay="197.350717ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-198834-m02"
I0916 23:58:33.376722 722351 pod_ready.go:94] pod "kube-controller-manager-ha-198834-m02" is "Ready"
I0916 23:58:33.376747 722351 pod_ready.go:86] duration metric: took 399.004052ms for pod "kube-controller-manager-ha-198834-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:33.376756 722351 pod_ready.go:83] waiting for pod "kube-controller-manager-ha-198834-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:33.573048 722351 request.go:683] "Waited before sending request" delay="196.186349ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-198834-m03"
I0916 23:58:33.773733 722351 request.go:683] "Waited before sending request" delay="197.347012ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-198834-m03"
I0916 23:58:33.776944 722351 pod_ready.go:94] pod "kube-controller-manager-ha-198834-m03" is "Ready"
I0916 23:58:33.776972 722351 pod_ready.go:86] duration metric: took 400.209131ms for pod "kube-controller-manager-ha-198834-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:33.973425 722351 request.go:683] "Waited before sending request" delay="196.344301ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods?labelSelector=k8s-app%3Dkube-proxy"
I0916 23:58:33.977203 722351 pod_ready.go:83] waiting for pod "kube-proxy-5tkhn" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:34.173688 722351 request.go:683] "Waited before sending request" delay="196.345801ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-5tkhn"
I0916 23:58:34.373026 722351 request.go:683] "Waited before sending request" delay="196.256084ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-198834"
I0916 23:58:34.376079 722351 pod_ready.go:94] pod "kube-proxy-5tkhn" is "Ready"
I0916 23:58:34.376106 722351 pod_ready.go:86] duration metric: took 398.875647ms for pod "kube-proxy-5tkhn" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:34.376114 722351 pod_ready.go:83] waiting for pod "kube-proxy-d8brp" in "kube-system" namespace to be "Ready" or be gone ...
I0916 23:58:34.573402 722351 request.go:683] "Waited before sending request" delay="197.174223ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-d8brp"
I0916 23:58:34.773022 722351 request.go:683] "Waited before sending request" delay="196.289258ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-198834-m03"
I0916 23:58:34.973958 722351 request.go:683] "Waited before sending request" delay="97.260541ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-d8brp"
I0916 23:58:35.173637 722351 request.go:683] "Waited before sending request" delay="196.407064ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-198834-m03"
I0916 23:58:35.573487 722351 request.go:683] "Waited before sending request" delay="193.254271ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-198834-m03"
I0916 23:58:35.973307 722351 request.go:683] "Waited before sending request" delay="93.259111ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-198834-m03"
W0916 23:58:36.383328 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:58:38.882062 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:58:40.882520 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:58:42.883194 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:58:45.382843 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:58:47.882744 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:58:49.882993 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:58:51.883265 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:58:54.383005 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:58:56.882555 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:58:59.382463 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:01.382897 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:03.883583 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:06.382581 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:08.882275 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:11.382224 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:13.382333 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:15.882727 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:18.383800 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:20.882547 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:22.883081 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:25.383627 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:27.882377 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:29.882787 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:31.884042 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:34.382932 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:36.882730 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:38.882959 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:40.883411 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:43.382771 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:45.882938 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:48.381607 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:50.382229 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:52.382889 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:54.882546 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:56.882802 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0916 23:59:58.882939 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:00.883550 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:03.382872 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:05.383021 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:07.384166 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:09.883064 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:11.884141 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:14.383248 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:16.883441 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:18.884438 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:21.383553 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:23.883713 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:26.383093 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:28.883552 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:31.383392 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:33.883626 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:35.883823 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:38.383553 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:40.883430 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:43.383026 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:45.883091 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:48.382865 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:50.882713 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:52.882989 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:55.383076 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:57.383555 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:00:59.882704 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:01:01.883495 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:01:04.382406 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:01:06.383424 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:01:08.883456 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:01:11.382988 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:01:13.882379 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:01:15.883651 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:01:18.382551 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:01:20.382997 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:01:22.882943 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:01:24.883256 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:01:27.383660 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:01:29.882955 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
W0917 00:01:32.383364 722351 pod_ready.go:104] pod "kube-proxy-d8brp" is not "Ready", error: <nil>
I0917 00:01:34.382530 722351 pod_ready.go:94] pod "kube-proxy-d8brp" is "Ready"
I0917 00:01:34.382562 722351 pod_ready.go:86] duration metric: took 3m0.006439942s for pod "kube-proxy-d8brp" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:01:34.382572 722351 pod_ready.go:83] waiting for pod "kube-proxy-h2fxd" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:01:34.387645 722351 pod_ready.go:94] pod "kube-proxy-h2fxd" is "Ready"
I0917 00:01:34.387677 722351 pod_ready.go:86] duration metric: took 5.098826ms for pod "kube-proxy-h2fxd" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:01:34.390707 722351 pod_ready.go:83] waiting for pod "kube-scheduler-ha-198834" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:01:34.396086 722351 pod_ready.go:94] pod "kube-scheduler-ha-198834" is "Ready"
I0917 00:01:34.396115 722351 pod_ready.go:86] duration metric: took 5.379692ms for pod "kube-scheduler-ha-198834" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:01:34.396126 722351 pod_ready.go:83] waiting for pod "kube-scheduler-ha-198834-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:01:34.400646 722351 pod_ready.go:94] pod "kube-scheduler-ha-198834-m02" is "Ready"
I0917 00:01:34.400670 722351 pod_ready.go:86] duration metric: took 4.536355ms for pod "kube-scheduler-ha-198834-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:01:34.400680 722351 pod_ready.go:83] waiting for pod "kube-scheduler-ha-198834-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:01:34.577209 722351 request.go:683] "Waited before sending request" delay="174.117357ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-198834-m03"
I0917 00:01:34.580767 722351 pod_ready.go:94] pod "kube-scheduler-ha-198834-m03" is "Ready"
I0917 00:01:34.580796 722351 pod_ready.go:86] duration metric: took 180.109317ms for pod "kube-scheduler-ha-198834-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0917 00:01:34.580808 722351 pod_ready.go:40] duration metric: took 3m4.808720134s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I0917 00:01:34.629691 722351 start.go:617] kubectl: 1.34.1, cluster: 1.34.0 (minor skew: 0)
I0917 00:01:34.631405 722351 out.go:179] * Done! kubectl is now configured to use "ha-198834" cluster and "default" namespace by default
==> Docker <==
Sep 16 23:57:25 ha-198834 cri-dockerd[1427]: time="2025-09-16T23:57:25Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-5wx4k_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 16 23:57:25 ha-198834 cri-dockerd[1427]: time="2025-09-16T23:57:25Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-mjbz6_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 16 23:57:25 ha-198834 cri-dockerd[1427]: time="2025-09-16T23:57:25Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/50aecbe9f874a63c5159d55af06211bca7903e623f01f1e603f267caaf6da9a7/resolv.conf as [nameserver 192.168.49.1 search local europe-west1-b.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options trust-ad ndots:0 edns0]"
Sep 16 23:57:26 ha-198834 cri-dockerd[1427]: time="2025-09-16T23:57:26Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-5wx4k_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 16 23:57:26 ha-198834 cri-dockerd[1427]: time="2025-09-16T23:57:26Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-mjbz6_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 16 23:57:29 ha-198834 cri-dockerd[1427]: time="2025-09-16T23:57:29Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
Sep 16 23:57:29 ha-198834 cri-dockerd[1427]: time="2025-09-16T23:57:29Z" level=info msg="Stop pulling image docker.io/kindest/kindnetd:v20250512-df8de77b: Status: Downloaded newer image for kindest/kindnetd:v20250512-df8de77b"
Sep 16 23:57:38 ha-198834 dockerd[1122]: time="2025-09-16T23:57:38.259744438Z" level=info msg="ignoring event" container=fde474653f398ec39c3db826d18aef42dd96b2e13f969de6637124df51136f75 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 23:57:38 ha-198834 dockerd[1122]: time="2025-09-16T23:57:38.275867775Z" level=info msg="ignoring event" container=64da07c62c4a9952e882760e7e5b5c04eda9df5e202ce0e9c2bf6fc892deeeea module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 23:57:38 ha-198834 dockerd[1122]: time="2025-09-16T23:57:38.320870537Z" level=info msg="ignoring event" container=310e06fbf27552640b0b3a8e13bad59df698b55eb4f3fb6f18b12db35aa6c730 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 23:57:38 ha-198834 dockerd[1122]: time="2025-09-16T23:57:38.336829292Z" level=info msg="ignoring event" container=a9537db0dd134f5d54858edc93311297fbfcf0df7c8779512025918dcaa8fc3d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 23:57:38 ha-198834 cri-dockerd[1427]: time="2025-09-16T23:57:38Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/870758f308362bc20e83047a4adf1621caf84b44c5752280d8fc86e4c48fbcab/resolv.conf as [nameserver 192.168.49.1 search local europe-west1-b.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options edns0 trust-ad ndots:0]"
Sep 16 23:57:38 ha-198834 cri-dockerd[1427]: time="2025-09-16T23:57:38Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/bf6d6b59f24132f5ce3eeb0feb770948fcab77227dc0f50c12a706b85a62d850/resolv.conf as [nameserver 192.168.49.1 search local europe-west1-b.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options edns0 trust-ad ndots:0]"
Sep 16 23:57:38 ha-198834 cri-dockerd[1427]: time="2025-09-16T23:57:38Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-5wx4k_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 16 23:57:38 ha-198834 cri-dockerd[1427]: time="2025-09-16T23:57:38Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-mjbz6_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 16 23:57:39 ha-198834 cri-dockerd[1427]: time="2025-09-16T23:57:39Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-5wx4k_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 16 23:57:39 ha-198834 cri-dockerd[1427]: time="2025-09-16T23:57:39Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-mjbz6_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 16 23:57:51 ha-198834 dockerd[1122]: time="2025-09-16T23:57:51.687384709Z" level=info msg="ignoring event" container=11889e34950f849cf7805c6d56f1957ad9d5af727f4810f2da728671398b9f6e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 23:57:51 ha-198834 dockerd[1122]: time="2025-09-16T23:57:51.687719889Z" level=info msg="ignoring event" container=1ccdf9f33d5601763297f230a2f6e51620db2ed183e9f4b9179f4ccef579dfac module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 23:57:51 ha-198834 dockerd[1122]: time="2025-09-16T23:57:51.756623723Z" level=info msg="ignoring event" container=bf6d6b59f24132f5ce3eeb0feb770948fcab77227dc0f50c12a706b85a62d850 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 23:57:51 ha-198834 dockerd[1122]: time="2025-09-16T23:57:51.756673284Z" level=info msg="ignoring event" container=870758f308362bc20e83047a4adf1621caf84b44c5752280d8fc86e4c48fbcab module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 23:57:51 ha-198834 cri-dockerd[1427]: time="2025-09-16T23:57:51Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/bfd4ac8a61c79e2858011e2eb2ea54afa70f0971e7fa5ea4f41775c0a5fcbba2/resolv.conf as [nameserver 192.168.49.1 search local europe-west1-b.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options ndots:0 edns0 trust-ad]"
Sep 16 23:57:51 ha-198834 cri-dockerd[1427]: time="2025-09-16T23:57:51Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/7b09bb6b1db0bb2224a5805349fc4d6295cace54536b13098ccf873075486000/resolv.conf as [nameserver 192.168.49.1 search local europe-west1-b.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options edns0 trust-ad ndots:0]"
Sep 17 00:01:36 ha-198834 cri-dockerd[1427]: time="2025-09-17T00:01:36Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/2ab50e090d466ea69554546e2e97ae7ed7a7527c0e0d169e99a4862ba0516a41/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local local europe-west1-b.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options ndots:5]"
Sep 17 00:01:37 ha-198834 cri-dockerd[1427]: time="2025-09-17T00:01:37Z" level=info msg="Stop pulling image gcr.io/k8s-minikube/busybox:1.28: Status: Downloaded newer image for gcr.io/k8s-minikube/busybox:1.28"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
43ce744921507 gcr.io/k8s-minikube/busybox@sha256:9afb80db71730dbb303fe00765cbf34bddbdc6b66e49897fc2e1861967584b12 About a minute ago Running busybox 0 2ab50e090d466 busybox-7b57f96db7-pstjp
f4f7ea59034e3 52546a367cc9e 5 minutes ago Running coredns 2 7b09bb6b1db0b coredns-66bc5c9577-mjbz6
9a9eb43950f05 52546a367cc9e 5 minutes ago Running coredns 2 bfd4ac8a61c79 coredns-66bc5c9577-5wx4k
1ccdf9f33d560 52546a367cc9e 5 minutes ago Exited coredns 1 bf6d6b59f2413 coredns-66bc5c9577-mjbz6
11889e34950f8 52546a367cc9e 5 minutes ago Exited coredns 1 870758f308362 coredns-66bc5c9577-5wx4k
470c5aeb0143c kindest/kindnetd@sha256:07a4b3fe0077a0ae606cc0a200fc25a28fa64dcc30b8d311b461089969449f9a 5 minutes ago Running kindnet-cni 0 f541f878be896 kindnet-h28vp
b16ddbbc469c5 6e38f40d628db 5 minutes ago Running storage-provisioner 0 50aecbe9f874a storage-provisioner
2da683f529549 df0860106674d 5 minutes ago Running kube-proxy 0 b04f554fbbf03 kube-proxy-5tkhn
8a32665f7e3e4 ghcr.io/kube-vip/kube-vip@sha256:4f256554a83a6d824ea9c5307450a2c3fd132e09c52b339326f94fefaf67155c 6 minutes ago Running kube-vip 0 5e4aed7a38e18 kube-vip-ha-198834
4f536df8f44eb a0af72f2ec6d6 6 minutes ago Running kube-controller-manager 0 3f97e150fa11b kube-controller-manager-ha-198834
ea129c2b5408a 90550c43ad2bc 6 minutes ago Running kube-apiserver 0 364803df34eb0 kube-apiserver-ha-198834
69601afa8d5b0 5f1f5298c888d 6 minutes ago Running etcd 0 d6bbb58cc14ca etcd-ha-198834
82a99d0c7744a 46169d968e920 6 minutes ago Running kube-scheduler 0 7ffde546949d7 kube-scheduler-ha-198834
==> coredns [11889e34950f] <==
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: connect: network is unreachable
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: connect: network is unreachable
[ERROR] plugin/kubernetes: Unhandled Error
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] plugin/health: Going into lameduck mode for 5s
[INFO] 127.0.0.1:50107 - 45856 "HINFO IN 4510730421515958928.8365162867102253976. udp 57 false 512" - - 0 5.000165011s
[ERROR] plugin/errors: 2 4510730421515958928.8365162867102253976. HINFO: dial udp 192.168.49.1:53: connect: network is unreachable
[INFO] 127.0.0.1:50484 - 7509 "HINFO IN 4510730421515958928.8365162867102253976. udp 57 false 512" - - 0 5.000096464s
[ERROR] plugin/errors: 2 4510730421515958928.8365162867102253976. HINFO: dial udp 192.168.49.1:53: connect: network is unreachable
==> coredns [1ccdf9f33d56] <==
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: connect: network is unreachable
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: connect: network is unreachable
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] plugin/health: Going into lameduck mode for 5s
[INFO] 127.0.0.1:49262 - 38359 "HINFO IN 3627584456028797286.2821467008707036685. udp 57 false 512" - - 0 5.000112146s
[ERROR] plugin/errors: 2 3627584456028797286.2821467008707036685. HINFO: dial udp 192.168.49.1:53: connect: network is unreachable
[INFO] 127.0.0.1:51442 - 41164 "HINFO IN 3627584456028797286.2821467008707036685. udp 57 false 512" - - 0 5.000125545s
[ERROR] plugin/errors: 2 3627584456028797286.2821467008707036685. HINFO: dial udp 192.168.49.1:53: connect: network is unreachable
==> coredns [9a9eb43950f0] <==
maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] 127.0.0.1:48462 - 46874 "HINFO IN 5273252588524494281.7165436024008789767. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.039842483s
[INFO] 10.244.1.2:57104 - 4 "A IN kubernetes.io. udp 31 false 512" NOERROR qr,rd,ra 89 0.003506199s
[INFO] 10.244.1.2:35085 - 6 "PTR IN 135.186.33.3.in-addr.arpa. udp 43 false 512" NOERROR qr,rd,ra 124 0.022178595s
[INFO] 10.244.0.4:51301 - 3 "AAAA IN kubernetes.io. udp 31 false 512" NOERROR qr,rd,ra 140 0.001619426s
[INFO] 10.244.1.2:53849 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000259853s
[INFO] 10.244.1.2:45188 - 5 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000162256s
[INFO] 10.244.1.2:47534 - 6 "A IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,rd,ra 111 0.012152721s
[INFO] 10.244.1.2:52406 - 8 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000144842s
[INFO] 10.244.0.4:34463 - 3 "AAAA IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,rd,ra 111 0.015354953s
[INFO] 10.244.0.4:44729 - 4 "AAAA IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000186473s
[INFO] 10.244.0.4:49846 - 5 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000119774s
[INFO] 10.244.0.4:48015 - 7 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000170848s
[INFO] 10.244.1.2:54294 - 3 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.00022712s
[INFO] 10.244.1.2:41177 - 4 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000176943s
[INFO] 10.244.1.2:35431 - 5 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000141918s
[INFO] 10.244.0.4:42357 - 3 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000222475s
[INFO] 10.244.0.4:38639 - 5 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000076388s
==> coredns [f4f7ea59034e] <==
maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] 127.0.0.1:55965 - 29875 "HINFO IN 6625775143588404920.46653605595863863. udp 55 false 512" NXDOMAIN qr,rd,ra 130 0.021009667s
[INFO] 10.244.1.2:48391 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000347519s
[INFO] 10.244.1.2:52968 - 3 "AAAA IN kubernetes.io. udp 31 false 512" NOERROR qr,rd,ra 140 0.006268731s
[INFO] 10.244.1.2:37064 - 5 "PTR IN 90.167.197.15.in-addr.arpa. udp 44 false 512" NOERROR qr,rd,ra 126 0.01625905s
[INFO] 10.244.0.4:38724 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.00015051s
[INFO] 10.244.0.4:54867 - 4 "A IN kubernetes.io. udp 31 false 512" NOERROR qr,rd,ra 89 0.000812822s
[INFO] 10.244.0.4:36556 - 5 "PTR IN 135.186.33.3.in-addr.arpa. udp 43 false 512" NOERROR qr,rd,ra 124 0.000823234s
[INFO] 10.244.0.4:49673 - 6 "PTR IN 90.167.197.15.in-addr.arpa. udp 44 false 512" NOERROR qr,aa,rd,ra 126 0.000092222s
[INFO] 10.244.1.2:54588 - 3 "AAAA IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,rd,ra 111 0.010881123s
[INFO] 10.244.1.2:37311 - 4 "AAAA IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000184317s
[INFO] 10.244.1.2:34776 - 7 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000214951s
[INFO] 10.244.1.2:60592 - 9 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000142928s
[INFO] 10.244.0.4:49014 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.00014497s
[INFO] 10.244.0.4:49266 - 6 "A IN kubernetes.default. udp 36 false 512" NXDOMAIN qr,rd,ra 111 0.004264282s
[INFO] 10.244.0.4:42048 - 8 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000128777s
[INFO] 10.244.0.4:37542 - 9 "PTR IN 1.0.96.10.in-addr.arpa. udp 40 false 512" NOERROR qr,aa,rd 112 0.000071541s
[INFO] 10.244.1.2:43417 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000165726s
[INFO] 10.244.0.4:54211 - 2 "PTR IN 10.0.96.10.in-addr.arpa. udp 41 false 512" NOERROR qr,aa,rd 116 0.000155228s
[INFO] 10.244.0.4:54131 - 4 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000161968s
==> describe nodes <==
Name: ha-198834
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=ha-198834
kubernetes.io/os=linux
minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a
minikube.k8s.io/name=ha-198834
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_09_16T23_57_19_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Tue, 16 Sep 2025 23:57:16 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: ha-198834
AcquireTime: <unset>
RenewTime: Wed, 17 Sep 2025 00:03:16 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Wed, 17 Sep 2025 00:01:53 +0000 Tue, 16 Sep 2025 23:57:15 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 17 Sep 2025 00:01:53 +0000 Tue, 16 Sep 2025 23:57:15 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 17 Sep 2025 00:01:53 +0000 Tue, 16 Sep 2025 23:57:15 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 17 Sep 2025 00:01:53 +0000 Tue, 16 Sep 2025 23:57:16 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: ha-198834
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863448Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863448Ki
pods: 110
System Info:
Machine ID: 3525bf030f0d49c1ab057441433c477c
System UUID: 70b73bcc-60ff-4343-a209-12ec7b2f4c5a
Boot ID: 38a5d1c6-3b0d-42c9-b748-79065a969107
Kernel Version: 6.8.0-1037-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://28.4.0
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox-7b57f96db7-pstjp 0 (0%) 0 (0%) 0 (0%) 0 (0%) 106s
kube-system coredns-66bc5c9577-5wx4k 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 5m57s
kube-system coredns-66bc5c9577-mjbz6 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 5m57s
kube-system etcd-ha-198834 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 6m3s
kube-system kindnet-h28vp 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 5m57s
kube-system kube-apiserver-ha-198834 250m (3%) 0 (0%) 0 (0%) 0 (0%) 6m3s
kube-system kube-controller-manager-ha-198834 200m (2%) 0 (0%) 0 (0%) 0 (0%) 6m3s
kube-system kube-proxy-5tkhn 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m57s
kube-system kube-scheduler-ha-198834 100m (1%) 0 (0%) 0 (0%) 0 (0%) 6m3s
kube-system kube-vip-ha-198834 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m5s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m56s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 950m (11%) 100m (1%)
memory 290Mi (0%) 390Mi (1%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 5m56s kube-proxy
Normal Starting 6m3s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 6m3s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 6m3s kubelet Node ha-198834 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 6m3s kubelet Node ha-198834 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 6m3s kubelet Node ha-198834 status is now: NodeHasSufficientPID
Normal RegisteredNode 5m58s node-controller Node ha-198834 event: Registered Node ha-198834 in Controller
Normal RegisteredNode 5m29s node-controller Node ha-198834 event: Registered Node ha-198834 in Controller
Normal RegisteredNode 4m58s node-controller Node ha-198834 event: Registered Node ha-198834 in Controller
Name: ha-198834-m02
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=ha-198834-m02
kubernetes.io/os=linux
minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a
minikube.k8s.io/name=ha-198834
minikube.k8s.io/primary=false
minikube.k8s.io/updated_at=2025_09_16T23_57_54_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Tue, 16 Sep 2025 23:57:53 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: ha-198834-m02
AcquireTime: <unset>
RenewTime: Wed, 17 Sep 2025 00:03:19 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Wed, 17 Sep 2025 00:01:48 +0000 Tue, 16 Sep 2025 23:57:53 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 17 Sep 2025 00:01:48 +0000 Tue, 16 Sep 2025 23:57:53 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 17 Sep 2025 00:01:48 +0000 Tue, 16 Sep 2025 23:57:53 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 17 Sep 2025 00:01:48 +0000 Tue, 16 Sep 2025 23:57:56 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.3
Hostname: ha-198834-m02
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863448Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863448Ki
pods: 110
System Info:
Machine ID: 35caf7934a824e33949ce426f7316bfd
System UUID: 0c81ae9e-e051-426a-b3a5-724dde7bd0d3
Boot ID: 38a5d1c6-3b0d-42c9-b748-79065a969107
Kernel Version: 6.8.0-1037-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://28.4.0
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.1.0/24
PodCIDRs: 10.244.1.0/24
Non-terminated Pods: (8 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox-7b57f96db7-kg4q6 0 (0%) 0 (0%) 0 (0%) 0 (0%) 106s
kube-system etcd-ha-198834-m02 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 5m25s
kube-system kindnet-2vbn5 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 5m28s
kube-system kube-apiserver-ha-198834-m02 250m (3%) 0 (0%) 0 (0%) 0 (0%) 5m25s
kube-system kube-controller-manager-ha-198834-m02 200m (2%) 0 (0%) 0 (0%) 0 (0%) 5m25s
kube-system kube-proxy-h2fxd 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m28s
kube-system kube-scheduler-ha-198834-m02 100m (1%) 0 (0%) 0 (0%) 0 (0%) 5m25s
kube-system kube-vip-ha-198834-m02 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m25s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%) 100m (1%)
memory 150Mi (0%) 50Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 5m21s kube-proxy
Normal RegisteredNode 5m24s node-controller Node ha-198834-m02 event: Registered Node ha-198834-m02 in Controller
Normal RegisteredNode 5m23s node-controller Node ha-198834-m02 event: Registered Node ha-198834-m02 in Controller
Normal RegisteredNode 4m58s node-controller Node ha-198834-m02 event: Registered Node ha-198834-m02 in Controller
Name: ha-198834-m03
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=ha-198834-m03
kubernetes.io/os=linux
minikube.k8s.io/commit=9829f0bc17c523e4378d28e0c25741106f24f00a
minikube.k8s.io/name=ha-198834
minikube.k8s.io/primary=false
minikube.k8s.io/updated_at=2025_09_16T23_58_25_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Tue, 16 Sep 2025 23:58:25 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: ha-198834-m03
AcquireTime: <unset>
RenewTime: Wed, 17 Sep 2025 00:03:11 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Wed, 17 Sep 2025 00:01:48 +0000 Tue, 16 Sep 2025 23:58:25 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 17 Sep 2025 00:01:48 +0000 Tue, 16 Sep 2025 23:58:25 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 17 Sep 2025 00:01:48 +0000 Tue, 16 Sep 2025 23:58:25 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 17 Sep 2025 00:01:48 +0000 Tue, 16 Sep 2025 23:58:29 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.4
Hostname: ha-198834-m03
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863448Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863448Ki
pods: 110
System Info:
Machine ID: 32c4e7dc065e4fa49595825994457b8e
System UUID: 6f810798-3461-44d1-91c3-d55b483ec842
Boot ID: 38a5d1c6-3b0d-42c9-b748-79065a969107
Kernel Version: 6.8.0-1037-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://28.4.0
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.2.0/24
PodCIDRs: 10.244.2.0/24
Non-terminated Pods: (8 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox-7b57f96db7-l2jn5 0 (0%) 0 (0%) 0 (0%) 0 (0%) 106s
kube-system etcd-ha-198834-m03 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 4m52s
kube-system kindnet-67fn9 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 4m47s
kube-system kube-apiserver-ha-198834-m03 250m (3%) 0 (0%) 0 (0%) 0 (0%) 4m52s
kube-system kube-controller-manager-ha-198834-m03 200m (2%) 0 (0%) 0 (0%) 0 (0%) 4m52s
kube-system kube-proxy-d8brp 0 (0%) 0 (0%) 0 (0%) 0 (0%) 4m56s
kube-system kube-scheduler-ha-198834-m03 100m (1%) 0 (0%) 0 (0%) 0 (0%) 4m52s
kube-system kube-vip-ha-198834-m03 0 (0%) 0 (0%) 0 (0%) 0 (0%) 4m52s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%) 100m (1%)
memory 150Mi (0%) 50Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal RegisteredNode 4m54s node-controller Node ha-198834-m03 event: Registered Node ha-198834-m03 in Controller
Normal RegisteredNode 4m53s node-controller Node ha-198834-m03 event: Registered Node ha-198834-m03 in Controller
Normal RegisteredNode 4m53s node-controller Node ha-198834-m03 event: Registered Node ha-198834-m03 in Controller
==> dmesg <==
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 12 d7 9b ce 2e 89 08 06
[ +4.978924] IPv4: martian source 10.244.0.1 from 10.244.0.28, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff d6 72 94 9b 14 ba 08 06
[ +0.000493] IPv4: martian source 10.244.0.28 from 10.244.0.5, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 5a 36 22 af 75 97 08 06
[Sep16 23:51] IPv4: martian source 10.244.0.1 from 10.244.0.32, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff ae a4 31 55 21 41 08 06
[ +0.000514] IPv4: martian source 10.244.0.32 from 10.244.0.5, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 5a 36 22 af 75 97 08 06
[ +0.000564] IPv4: martian source 10.244.0.32 from 10.244.0.9, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 72 7f 09 ee 64 b6 08 06
[Sep16 23:52] IPv4: martian source 10.244.0.33 from 10.244.0.27, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 12 d7 9b ce 2e 89 08 06
[ +0.314795] IPv4: martian source 10.244.0.27 from 10.244.0.5, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff 5a 36 22 af 75 97 08 06
[Sep16 23:54] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff a2 29 1f 42 ac 54 08 06
[ +0.101248] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 02 b5 d6 ff 8d 76 08 06
[ +45.338162] IPv4: martian source 10.244.0.1 from 10.244.0.4, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff 16 e6 31 2b 22 43 08 06
[Sep16 23:55] IPv4: martian source 10.244.0.1 from 10.244.0.6, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff d2 f1 85 bd 7a a7 08 06
[Sep16 23:56] IPv4: martian source 10.244.0.1 from 10.244.0.15, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 6a 1c d5 f1 cd b8 08 06
==> etcd [69601afa8d5b] <==
{"level":"info","ts":"2025-09-16T23:58:12.665306Z","caller":"rafthttp/stream.go:273","msg":"established TCP streaming connection with remote peer","stream-writer-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"b3d041dbb5a11c89"}
{"level":"info","ts":"2025-09-16T23:58:12.670540Z","caller":"rafthttp/stream.go:248","msg":"set message encoder","from":"aec36adc501070cc","to":"b3d041dbb5a11c89","stream-type":"stream Message"}
{"level":"info","ts":"2025-09-16T23:58:12.671162Z","caller":"rafthttp/stream.go:273","msg":"established TCP streaming connection with remote peer","stream-writer-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"b3d041dbb5a11c89"}
{"level":"info","ts":"2025-09-16T23:58:12.670991Z","caller":"etcdserver/snapshot_merge.go:64","msg":"sent database snapshot to writer","bytes":1384448,"size":"1.4 MB"}
{"level":"info","ts":"2025-09-16T23:58:12.677546Z","caller":"rafthttp/snapshot_sender.go:131","msg":"sent database snapshot","snapshot-index":686,"remote-peer-id":"b3d041dbb5a11c89","bytes":1393601,"size":"1.4 MB"}
{"level":"warn","ts":"2025-09-16T23:58:12.688158Z","caller":"rafthttp/stream.go:420","msg":"lost TCP streaming connection with remote peer","stream-reader-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"b3d041dbb5a11c89","error":"EOF"}
{"level":"warn","ts":"2025-09-16T23:58:12.688674Z","caller":"rafthttp/stream.go:420","msg":"lost TCP streaming connection with remote peer","stream-reader-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"b3d041dbb5a11c89","error":"EOF"}
{"level":"info","ts":"2025-09-16T23:58:12.699050Z","caller":"rafthttp/stream.go:248","msg":"set message encoder","from":"aec36adc501070cc","to":"b3d041dbb5a11c89","stream-type":"stream MsgApp v2"}
{"level":"warn","ts":"2025-09-16T23:58:12.699094Z","caller":"rafthttp/stream.go:264","msg":"closed TCP streaming connection with remote peer","stream-writer-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"b3d041dbb5a11c89"}
{"level":"info","ts":"2025-09-16T23:58:12.699108Z","caller":"rafthttp/stream.go:273","msg":"established TCP streaming connection with remote peer","stream-writer-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"b3d041dbb5a11c89"}
{"level":"info","ts":"2025-09-16T23:58:12.702028Z","caller":"rafthttp/stream.go:248","msg":"set message encoder","from":"aec36adc501070cc","to":"b3d041dbb5a11c89","stream-type":"stream Message"}
{"level":"warn","ts":"2025-09-16T23:58:12.702080Z","caller":"rafthttp/stream.go:264","msg":"closed TCP streaming connection with remote peer","stream-writer-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"b3d041dbb5a11c89"}
{"level":"info","ts":"2025-09-16T23:58:12.702094Z","caller":"rafthttp/stream.go:273","msg":"established TCP streaming connection with remote peer","stream-writer-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"b3d041dbb5a11c89"}
{"level":"info","ts":"2025-09-16T23:58:12.733438Z","caller":"rafthttp/stream.go:411","msg":"established TCP streaming connection with remote peer","stream-reader-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"b3d041dbb5a11c89"}
{"level":"info","ts":"2025-09-16T23:58:12.736369Z","caller":"rafthttp/stream.go:411","msg":"established TCP streaming connection with remote peer","stream-reader-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"b3d041dbb5a11c89"}
{"level":"warn","ts":"2025-09-16T23:58:12.759123Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"192.168.49.4:34222","server-name":"","error":"EOF"}
{"level":"info","ts":"2025-09-16T23:58:12.760774Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"aec36adc501070cc switched to configuration voters=(5981864578030751937 12593026477526642892 12956928539845794953)"}
{"level":"info","ts":"2025-09-16T23:58:12.760967Z","caller":"membership/cluster.go:550","msg":"promote member","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","promoted-member-id":"b3d041dbb5a11c89"}
{"level":"info","ts":"2025-09-16T23:58:12.761007Z","caller":"etcdserver/server.go:1752","msg":"applied a configuration change through raft","local-member-id":"aec36adc501070cc","raft-conf-change":"ConfChangeAddNode","raft-conf-change-node-id":"b3d041dbb5a11c89"}
{"level":"info","ts":"2025-09-16T23:58:19.991223Z","caller":"etcdserver/server.go:2246","msg":"skip compaction since there is an inflight snapshot"}
{"level":"info","ts":"2025-09-16T23:58:25.496900Z","caller":"etcdserver/server.go:2246","msg":"skip compaction since there is an inflight snapshot"}
{"level":"info","ts":"2025-09-16T23:58:30.072550Z","caller":"etcdserver/server.go:2246","msg":"skip compaction since there is an inflight snapshot"}
{"level":"info","ts":"2025-09-16T23:58:32.068856Z","caller":"etcdserver/server.go:2246","msg":"skip compaction since there is an inflight snapshot"}
{"level":"info","ts":"2025-09-16T23:58:40.123997Z","caller":"etcdserver/server.go:2246","msg":"skip compaction since there is an inflight snapshot"}
{"level":"info","ts":"2025-09-16T23:58:42.678047Z","caller":"etcdserver/server.go:1856","msg":"sent merged snapshot","from":"aec36adc501070cc","to":"b3d041dbb5a11c89","bytes":1393601,"size":"1.4 MB","took":"30.013494343s"}
==> kernel <==
00:03:21 up 2:45, 0 users, load average: 2.29, 1.39, 1.12
Linux ha-198834 6.8.0-1037-gcp #39~22.04.1-Ubuntu SMP Thu Aug 21 17:29:24 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kindnet [470c5aeb0143] <==
I0917 00:02:40.420211 1 main.go:324] Node ha-198834-m03 has CIDR [10.244.2.0/24]
I0917 00:02:50.424337 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0917 00:02:50.424386 1 main.go:324] Node ha-198834-m02 has CIDR [10.244.1.0/24]
I0917 00:02:50.424593 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0917 00:02:50.424610 1 main.go:324] Node ha-198834-m03 has CIDR [10.244.2.0/24]
I0917 00:02:50.424745 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0917 00:02:50.424758 1 main.go:301] handling current node
I0917 00:03:00.418533 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0917 00:03:00.418581 1 main.go:324] Node ha-198834-m02 has CIDR [10.244.1.0/24]
I0917 00:03:00.418801 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0917 00:03:00.418814 1 main.go:324] Node ha-198834-m03 has CIDR [10.244.2.0/24]
I0917 00:03:00.418930 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0917 00:03:00.418942 1 main.go:301] handling current node
I0917 00:03:10.423193 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0917 00:03:10.423225 1 main.go:324] Node ha-198834-m02 has CIDR [10.244.1.0/24]
I0917 00:03:10.423436 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0917 00:03:10.423448 1 main.go:324] Node ha-198834-m03 has CIDR [10.244.2.0/24]
I0917 00:03:10.423551 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0917 00:03:10.423559 1 main.go:301] handling current node
I0917 00:03:20.423023 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0917 00:03:20.423063 1 main.go:301] handling current node
I0917 00:03:20.423080 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0917 00:03:20.423085 1 main.go:324] Node ha-198834-m02 has CIDR [10.244.1.0/24]
I0917 00:03:20.423378 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0917 00:03:20.423393 1 main.go:324] Node ha-198834-m03 has CIDR [10.244.2.0/24]
==> kube-apiserver [ea129c2b5408] <==
I0916 23:57:18.340630 1 controller.go:667] quota admission added evaluator for: serviceaccounts
I0916 23:57:19.016197 1 controller.go:667] quota admission added evaluator for: deployments.apps
I0916 23:57:19.025253 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I0916 23:57:19.032951 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I0916 23:57:23.344022 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I0916 23:57:24.194840 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0916 23:57:24.200277 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0916 23:57:24.242655 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps
I0916 23:58:29.048843 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0916 23:58:34.361323 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0916 23:59:36.632983 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:00:02.667929 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:00:58.976838 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:01:19.218755 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:02:15.644338 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0917 00:02:43.338268 1 stats.go:136] "Error getting keys" err="empty key: \"\""
E0917 00:03:18.851078 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:58262: use of closed network connection
E0917 00:03:19.024113 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:58282: use of closed network connection
E0917 00:03:19.194951 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:58306: use of closed network connection
E0917 00:03:19.388722 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:58332: use of closed network connection
E0917 00:03:19.557698 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:58342: use of closed network connection
E0917 00:03:19.744687 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:58348: use of closed network connection
E0917 00:03:19.919836 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:58362: use of closed network connection
E0917 00:03:20.087518 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:58376: use of closed network connection
E0917 00:03:20.254024 1 conn.go:339] Error on socket receive: read tcp 192.168.49.254:8443->192.168.49.1:58398: use of closed network connection
==> kube-controller-manager [4f536df8f44e] <==
I0916 23:57:23.340737 1 shared_informer.go:356] "Caches are synced" controller="legacy-service-account-token-cleaner"
I0916 23:57:23.340876 1 shared_informer.go:356] "Caches are synced" controller="PVC protection"
I0916 23:57:23.341125 1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
I0916 23:57:23.341625 1 shared_informer.go:356] "Caches are synced" controller="PV protection"
I0916 23:57:23.341694 1 shared_informer.go:356] "Caches are synced" controller="cronjob"
I0916 23:57:23.342559 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
I0916 23:57:23.344828 1 shared_informer.go:356] "Caches are synced" controller="namespace"
I0916 23:57:23.344975 1 shared_informer.go:356] "Caches are synced" controller="node"
I0916 23:57:23.345054 1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller"
I0916 23:57:23.345095 1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller"
I0916 23:57:23.345107 1 shared_informer.go:349] "Waiting for caches to sync" controller="cidrallocator"
I0916 23:57:23.345114 1 shared_informer.go:356] "Caches are synced" controller="cidrallocator"
I0916 23:57:23.346125 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I0916 23:57:23.351186 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="ha-198834" podCIDRs=["10.244.0.0/24"]
I0916 23:57:23.356557 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I0916 23:57:23.360087 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I0916 23:57:53.917484 1 actual_state_of_world.go:541] "Failed to update statusUpdateNeeded field in actual state of world" logger="persistentvolume-attach-detach-controller" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"ha-198834-m02\" does not exist"
I0916 23:57:53.927329 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="ha-198834-m02" podCIDRs=["10.244.1.0/24"]
I0916 23:57:58.295579 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="ha-198834-m02"
E0916 23:58:24.690329 1 certificate_controller.go:151] "Unhandled Error" err="Sync csr-89jfn failed with : error updating approval for csr: Operation cannot be fulfilled on certificatesigningrequests.certificates.k8s.io \"csr-89jfn\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError"
E0916 23:58:24.703047 1 certificate_controller.go:151] "Unhandled Error" err="Sync csr-89jfn failed with : error updating signature for csr: Operation cannot be fulfilled on certificatesigningrequests.certificates.k8s.io \"csr-89jfn\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError"
I0916 23:58:25.387067 1 actual_state_of_world.go:541] "Failed to update statusUpdateNeeded field in actual state of world" logger="persistentvolume-attach-detach-controller" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"ha-198834-m03\" does not exist"
I0916 23:58:25.397154 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="ha-198834-m03" podCIDRs=["10.244.2.0/24"]
I0916 23:58:28.308323 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="ha-198834-m03"
E0917 00:01:35.727697 1 replica_set.go:587] "Unhandled Error" err="sync \"default/busybox-7b57f96db7\" failed with Operation cannot be fulfilled on replicasets.apps \"busybox-7b57f96db7\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError"
==> kube-proxy [2da683f52954] <==
I0916 23:57:24.932824 1 server_linux.go:53] "Using iptables proxy"
I0916 23:57:25.001436 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I0916 23:57:25.102414 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I0916 23:57:25.102449 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.49.2"]
E0916 23:57:25.102563 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0916 23:57:25.131540 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0916 23:57:25.131604 1 server_linux.go:132] "Using iptables Proxier"
I0916 23:57:25.138482 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0916 23:57:25.139006 1 server.go:527] "Version info" version="v1.34.0"
I0916 23:57:25.139079 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0916 23:57:25.143232 1 config.go:403] "Starting serviceCIDR config controller"
I0916 23:57:25.143254 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I0916 23:57:25.143282 1 config.go:200] "Starting service config controller"
I0916 23:57:25.143288 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I0916 23:57:25.143298 1 config.go:106] "Starting endpoint slice config controller"
I0916 23:57:25.143304 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I0916 23:57:25.144514 1 config.go:309] "Starting node config controller"
I0916 23:57:25.144540 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I0916 23:57:25.144548 1 shared_informer.go:356] "Caches are synced" controller="node config"
I0916 23:57:25.243772 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I0916 23:57:25.243822 1 shared_informer.go:356] "Caches are synced" controller="service config"
I0916 23:57:25.243822 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
==> kube-scheduler [82a99d0c7744] <==
E0916 23:58:30.036759 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"kindnet-rwc59\": pod kindnet-rwc59 is already assigned to node \"ha-198834-m03\"" plugin="DefaultBinder" pod="kube-system/kindnet-rwc59" node="ha-198834-m03"
E0916 23:58:30.036813 1 schedule_one.go:379] "scheduler cache ForgetPod failed" err="pod 5897933c-61bc-4eef-8922-66c37ba68c57(kube-system/kindnet-rwc59) wasn't assumed so cannot be forgotten" logger="UnhandledError" pod="kube-system/kindnet-rwc59"
E0916 23:58:30.036834 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"kindnet-rwc59\": pod kindnet-rwc59 is already assigned to node \"ha-198834-m03\"" logger="UnhandledError" pod="kube-system/kindnet-rwc59"
I0916 23:58:30.038109 1 schedule_one.go:1092] "Pod has been assigned to node. Abort adding it back to queue." pod="kube-system/kindnet-rwc59" node="ha-198834-m03"
E0916 23:58:30.048424 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"kindnet-lpn5v\": pod kindnet-lpn5v is already assigned to node \"ha-198834-m03\"" plugin="DefaultBinder" pod="kube-system/kindnet-lpn5v" node="ha-198834-m03"
E0916 23:58:30.048665 1 schedule_one.go:379] "scheduler cache ForgetPod failed" err="pod 4edbf3a1-360c-4f5c-81a3-aa63deb9a159(kube-system/kindnet-lpn5v) wasn't assumed so cannot be forgotten" logger="UnhandledError" pod="kube-system/kindnet-lpn5v"
E0916 23:58:30.048751 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"kindnet-lpn5v\": pod kindnet-lpn5v is already assigned to node \"ha-198834-m03\"" logger="UnhandledError" pod="kube-system/kindnet-lpn5v"
I0916 23:58:30.051563 1 schedule_one.go:1092] "Pod has been assigned to node. Abort adding it back to queue." pod="kube-system/kindnet-lpn5v" node="ha-198834-m03"
E0916 23:58:32.033373 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"kindnet-wklkh\": pod kindnet-wklkh is already assigned to node \"ha-198834-m03\"" plugin="DefaultBinder" pod="kube-system/kindnet-wklkh" node="ha-198834-m03"
E0916 23:58:32.033442 1 schedule_one.go:379] "scheduler cache ForgetPod failed" err="pod b547a423-84fb-45ae-be85-ebd5ae31cede(kube-system/kindnet-wklkh) wasn't assumed so cannot be forgotten" logger="UnhandledError" pod="kube-system/kindnet-wklkh"
E0916 23:58:32.033468 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"kindnet-wklkh\": pod kindnet-wklkh is already assigned to node \"ha-198834-m03\"" logger="UnhandledError" pod="kube-system/kindnet-wklkh"
I0916 23:58:32.034562 1 schedule_one.go:1092] "Pod has been assigned to node. Abort adding it back to queue." pod="kube-system/kindnet-wklkh" node="ha-198834-m03"
E0916 23:58:34.059741 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"kindnet-cdptd\": pod kindnet-cdptd is already assigned to node \"ha-198834-m03\"" plugin="DefaultBinder" pod="kube-system/kindnet-cdptd" node="ha-198834-m03"
E0916 23:58:34.059840 1 schedule_one.go:379] "scheduler cache ForgetPod failed" err="pod ceef8152-3e11-4bf0-99dc-43470c027544(kube-system/kindnet-cdptd) wasn't assumed so cannot be forgotten" logger="UnhandledError" pod="kube-system/kindnet-cdptd"
E0916 23:58:34.059869 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"kindnet-cdptd\": pod kindnet-cdptd is already assigned to node \"ha-198834-m03\"" logger="UnhandledError" pod="kube-system/kindnet-cdptd"
E0916 23:58:34.060293 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"kindnet-8t8pb\": pod kindnet-8t8pb is already assigned to node \"ha-198834-m03\"" plugin="DefaultBinder" pod="kube-system/kindnet-8t8pb" node="ha-198834-m03"
E0916 23:58:34.060658 1 schedule_one.go:379] "scheduler cache ForgetPod failed" err="pod c8813f5f-dfaf-4be1-a0ba-e444bcb2e943(kube-system/kindnet-8t8pb) wasn't assumed so cannot be forgotten" logger="UnhandledError" pod="kube-system/kindnet-8t8pb"
E0916 23:58:34.061375 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"kindnet-8t8pb\": pod kindnet-8t8pb is already assigned to node \"ha-198834-m03\"" logger="UnhandledError" pod="kube-system/kindnet-8t8pb"
I0916 23:58:34.061557 1 schedule_one.go:1092] "Pod has been assigned to node. Abort adding it back to queue." pod="kube-system/kindnet-cdptd" node="ha-198834-m03"
I0916 23:58:34.062640 1 schedule_one.go:1092] "Pod has been assigned to node. Abort adding it back to queue." pod="kube-system/kindnet-8t8pb" node="ha-198834-m03"
I0917 00:01:35.538693 1 cache.go:512] "Pod was added to a different node than it was assumed" podKey="ecad6988-1efb-4cc6-8920-902b41d3f3ed" pod="default/busybox-7b57f96db7-kg4q6" assumedNode="ha-198834-m02" currentNode="ha-198834-m03"
E0917 00:01:35.544474 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"busybox-7b57f96db7-kg4q6\": pod busybox-7b57f96db7-kg4q6 is already assigned to node \"ha-198834-m02\"" plugin="DefaultBinder" pod="default/busybox-7b57f96db7-kg4q6" node="ha-198834-m03"
E0917 00:01:35.546366 1 schedule_one.go:379] "scheduler cache ForgetPod failed" err="pod ecad6988-1efb-4cc6-8920-902b41d3f3ed(default/busybox-7b57f96db7-kg4q6) was assumed on ha-198834-m03 but assigned to ha-198834-m02" logger="UnhandledError" pod="default/busybox-7b57f96db7-kg4q6"
E0917 00:01:35.546583 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"busybox-7b57f96db7-kg4q6\": pod busybox-7b57f96db7-kg4q6 is already assigned to node \"ha-198834-m02\"" logger="UnhandledError" pod="default/busybox-7b57f96db7-kg4q6"
I0917 00:01:35.548055 1 schedule_one.go:1092] "Pod has been assigned to node. Abort adding it back to queue." pod="default/busybox-7b57f96db7-kg4q6" node="ha-198834-m02"
==> kubelet <==
Sep 16 23:57:24 ha-198834 kubelet[2468]: I0916 23:57:24.349086 2468 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/6c51d39f-7e43-461b-a021-13ddf0cb9845-lib-modules\") pod \"kindnet-h28vp\" (UID: \"6c51d39f-7e43-461b-a021-13ddf0cb9845\") " pod="kube-system/kindnet-h28vp"
Sep 16 23:57:24 ha-198834 kubelet[2468]: I0916 23:57:24.349103 2468 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/5edbfebe-2590-4d23-b80e-7496a4e9a5b6-xtables-lock\") pod \"kube-proxy-5tkhn\" (UID: \"5edbfebe-2590-4d23-b80e-7496a4e9a5b6\") " pod="kube-system/kube-proxy-5tkhn"
Sep 16 23:57:24 ha-198834 kubelet[2468]: I0916 23:57:24.349123 2468 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84n49\" (UniqueName: \"kubernetes.io/projected/5edbfebe-2590-4d23-b80e-7496a4e9a5b6-kube-api-access-84n49\") pod \"kube-proxy-5tkhn\" (UID: \"5edbfebe-2590-4d23-b80e-7496a4e9a5b6\") " pod="kube-system/kube-proxy-5tkhn"
Sep 16 23:57:24 ha-198834 kubelet[2468]: I0916 23:57:24.650251 2468 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6f279fd8-dd3c-49a5-863d-a53124ecf1f5-config-volume\") pod \"coredns-66bc5c9577-5wx4k\" (UID: \"6f279fd8-dd3c-49a5-863d-a53124ecf1f5\") " pod="kube-system/coredns-66bc5c9577-5wx4k"
Sep 16 23:57:24 ha-198834 kubelet[2468]: I0916 23:57:24.650425 2468 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-th5ns\" (UniqueName: \"kubernetes.io/projected/c918625f-be11-44bf-8b82-d4c21b8993d1-kube-api-access-th5ns\") pod \"coredns-66bc5c9577-mjbz6\" (UID: \"c918625f-be11-44bf-8b82-d4c21b8993d1\") " pod="kube-system/coredns-66bc5c9577-mjbz6"
Sep 16 23:57:24 ha-198834 kubelet[2468]: I0916 23:57:24.650660 2468 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c918625f-be11-44bf-8b82-d4c21b8993d1-config-volume\") pod \"coredns-66bc5c9577-mjbz6\" (UID: \"c918625f-be11-44bf-8b82-d4c21b8993d1\") " pod="kube-system/coredns-66bc5c9577-mjbz6"
Sep 16 23:57:24 ha-198834 kubelet[2468]: I0916 23:57:24.650701 2468 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhmb4\" (UniqueName: \"kubernetes.io/projected/6f279fd8-dd3c-49a5-863d-a53124ecf1f5-kube-api-access-xhmb4\") pod \"coredns-66bc5c9577-5wx4k\" (UID: \"6f279fd8-dd3c-49a5-863d-a53124ecf1f5\") " pod="kube-system/coredns-66bc5c9577-5wx4k"
Sep 16 23:57:25 ha-198834 kubelet[2468]: I0916 23:57:25.014693 2468 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-5tkhn" podStartSLOduration=1.014665687 podStartE2EDuration="1.014665687s" podCreationTimestamp="2025-09-16 23:57:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-16 23:57:24.932304069 +0000 UTC m=+6.176281069" watchObservedRunningTime="2025-09-16 23:57:25.014665687 +0000 UTC m=+6.258642688"
Sep 16 23:57:25 ha-198834 kubelet[2468]: I0916 23:57:25.042478 2468 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="310e06fbf27552640b0b3a8e13bad59df698b55eb4f3fb6f18b12db35aa6c730"
Sep 16 23:57:25 ha-198834 kubelet[2468]: I0916 23:57:25.046332 2468 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f541f878be89694936d8219d8e7fc682a8a169d9edf6417f067927aa4748c0ae"
Sep 16 23:57:25 ha-198834 kubelet[2468]: I0916 23:57:25.153403 2468 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqrvp\" (UniqueName: \"kubernetes.io/projected/6b6f64f3-2647-4e13-be41-47fcc6111f3e-kube-api-access-jqrvp\") pod \"storage-provisioner\" (UID: \"6b6f64f3-2647-4e13-be41-47fcc6111f3e\") " pod="kube-system/storage-provisioner"
Sep 16 23:57:25 ha-198834 kubelet[2468]: I0916 23:57:25.153458 2468 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/6b6f64f3-2647-4e13-be41-47fcc6111f3e-tmp\") pod \"storage-provisioner\" (UID: \"6b6f64f3-2647-4e13-be41-47fcc6111f3e\") " pod="kube-system/storage-provisioner"
Sep 16 23:57:26 ha-198834 kubelet[2468]: I0916 23:57:26.098005 2468 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-5wx4k" podStartSLOduration=2.097979793 podStartE2EDuration="2.097979793s" podCreationTimestamp="2025-09-16 23:57:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-16 23:57:26.086842117 +0000 UTC m=+7.330819118" watchObservedRunningTime="2025-09-16 23:57:26.097979793 +0000 UTC m=+7.341956793"
Sep 16 23:57:26 ha-198834 kubelet[2468]: I0916 23:57:26.098130 2468 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=1.098124108 podStartE2EDuration="1.098124108s" podCreationTimestamp="2025-09-16 23:57:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-16 23:57:26.097817254 +0000 UTC m=+7.341794256" watchObservedRunningTime="2025-09-16 23:57:26.098124108 +0000 UTC m=+7.342101108"
Sep 16 23:57:29 ha-198834 kubelet[2468]: I0916 23:57:29.159968 2468 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-mjbz6" podStartSLOduration=5.159946005 podStartE2EDuration="5.159946005s" podCreationTimestamp="2025-09-16 23:57:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-16 23:57:26.124330373 +0000 UTC m=+7.368307374" watchObservedRunningTime="2025-09-16 23:57:29.159946005 +0000 UTC m=+10.403923006"
Sep 16 23:57:29 ha-198834 kubelet[2468]: I0916 23:57:29.193262 2468 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Sep 16 23:57:29 ha-198834 kubelet[2468]: I0916 23:57:29.194144 2468 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Sep 16 23:57:30 ha-198834 kubelet[2468]: I0916 23:57:30.158085 2468 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kindnet-h28vp" podStartSLOduration=1.342825895 podStartE2EDuration="6.158061718s" podCreationTimestamp="2025-09-16 23:57:24 +0000 UTC" firstStartedPulling="2025-09-16 23:57:24.955662014 +0000 UTC m=+6.199639012" lastFinishedPulling="2025-09-16 23:57:29.770897851 +0000 UTC m=+11.014874835" observedRunningTime="2025-09-16 23:57:30.157595407 +0000 UTC m=+11.401572408" watchObservedRunningTime="2025-09-16 23:57:30.158061718 +0000 UTC m=+11.402038720"
Sep 16 23:57:39 ha-198834 kubelet[2468]: I0916 23:57:39.230434 2468 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="310e06fbf27552640b0b3a8e13bad59df698b55eb4f3fb6f18b12db35aa6c730"
Sep 16 23:57:39 ha-198834 kubelet[2468]: I0916 23:57:39.258365 2468 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9537db0dd134f5d54858edc93311297fbfcf0df7c8779512025918dcaa8fc3d"
Sep 16 23:57:52 ha-198834 kubelet[2468]: I0916 23:57:52.370599 2468 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="870758f308362bc20e83047a4adf1621caf84b44c5752280d8fc86e4c48fbcab"
Sep 16 23:57:52 ha-198834 kubelet[2468]: I0916 23:57:52.370662 2468 scope.go:117] "RemoveContainer" containerID="fde474653f398ec39c3db826d18aef42dd96b2e13f969de6637124df51136f75"
Sep 16 23:57:52 ha-198834 kubelet[2468]: I0916 23:57:52.388953 2468 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf6d6b59f24132f5ce3eeb0feb770948fcab77227dc0f50c12a706b85a62d850"
Sep 16 23:57:52 ha-198834 kubelet[2468]: I0916 23:57:52.389033 2468 scope.go:117] "RemoveContainer" containerID="64da07c62c4a9952e882760e7e5b5c04eda9df5e202ce0e9c2bf6fc892deeeea"
Sep 17 00:01:35 ha-198834 kubelet[2468]: I0917 00:01:35.703764 2468 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vt5r6\" (UniqueName: \"kubernetes.io/projected/a7cf1231-2a12-4247-a01a-2c2f02f5f2d8-kube-api-access-vt5r6\") pod \"busybox-7b57f96db7-pstjp\" (UID: \"a7cf1231-2a12-4247-a01a-2c2f02f5f2d8\") " pod="default/busybox-7b57f96db7-pstjp"
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p ha-198834 -n ha-198834
helpers_test.go:269: (dbg) Run: kubectl --context ha-198834 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestMultiControlPlane/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestMultiControlPlane/serial/DeployApp (106.80s)