=== RUN TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-071895 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [3abcbd08-d7c4-4a13-b94c-6f6424975411] Pending
helpers_test.go:352: "busybox" [3abcbd08-d7c4-4a13-b94c-6f6424975411] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [3abcbd08-d7c4-4a13-b94c-6f6424975411] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 11.008889486s
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-071895 exec busybox -- /bin/sh -c "ulimit -n"
start_stop_delete_test.go:194: 'ulimit -n' returned 1024, expected 1048576
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-071895
helpers_test.go:243: (dbg) docker inspect old-k8s-version-071895:
-- stdout --
[
{
"Id": "cb39490005387f20e45d85449d7cd3926a38c4f6954c93fdb4e9a9d8c1dd56c0",
"Created": "2025-11-29T09:19:35.843753446Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 219639,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-29T09:19:35.922684387Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:572c983e466f1f784136812eef5cc59ac623db764bc7704d3676c4643993fd08",
"ResolvConfPath": "/var/lib/docker/containers/cb39490005387f20e45d85449d7cd3926a38c4f6954c93fdb4e9a9d8c1dd56c0/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/cb39490005387f20e45d85449d7cd3926a38c4f6954c93fdb4e9a9d8c1dd56c0/hostname",
"HostsPath": "/var/lib/docker/containers/cb39490005387f20e45d85449d7cd3926a38c4f6954c93fdb4e9a9d8c1dd56c0/hosts",
"LogPath": "/var/lib/docker/containers/cb39490005387f20e45d85449d7cd3926a38c4f6954c93fdb4e9a9d8c1dd56c0/cb39490005387f20e45d85449d7cd3926a38c4f6954c93fdb4e9a9d8c1dd56c0-json.log",
"Name": "/old-k8s-version-071895",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-071895:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "old-k8s-version-071895",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "cb39490005387f20e45d85449d7cd3926a38c4f6954c93fdb4e9a9d8c1dd56c0",
"LowerDir": "/var/lib/docker/overlay2/39dddc1dab2647088ef22e0a22ddfff676f8c9bdc540988436a11252cc093aa5-init/diff:/var/lib/docker/overlay2/fc2ab0019906b90b3f033fa414f560878b73f7ff0ebdf77a0b554a40813009d9/diff",
"MergedDir": "/var/lib/docker/overlay2/39dddc1dab2647088ef22e0a22ddfff676f8c9bdc540988436a11252cc093aa5/merged",
"UpperDir": "/var/lib/docker/overlay2/39dddc1dab2647088ef22e0a22ddfff676f8c9bdc540988436a11252cc093aa5/diff",
"WorkDir": "/var/lib/docker/overlay2/39dddc1dab2647088ef22e0a22ddfff676f8c9bdc540988436a11252cc093aa5/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "old-k8s-version-071895",
"Source": "/var/lib/docker/volumes/old-k8s-version-071895/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "old-k8s-version-071895",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-071895",
"name.minikube.sigs.k8s.io": "old-k8s-version-071895",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "60a614c2d74d8f721c5d191b45e8f8728a313afe9d5488b154acf3a0ac189fb9",
"SandboxKey": "/var/run/docker/netns/60a614c2d74d",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33053"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33054"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33057"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33055"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33056"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"old-k8s-version-071895": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "56:be:6c:06:cc:ee",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "46e34ec2f3d70587bfaede542f848856d8f0dbb2dcdc34fe102884ad13766b95",
"EndpointID": "2663a5dbde2357e0d7269cf1f8d9d8bb11ffe6e49aa8754901238cb93acbbf02",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-071895",
"cb3949000538"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-071895 -n old-k8s-version-071895
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p old-k8s-version-071895 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p old-k8s-version-071895 logs -n 25: (1.765314466s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────┬─────────┬─────────┬─────────────────────┬────────────
─────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────┼─────────┼─────────┼─────────────────────┼────────────
─────────┤
│ ssh │ -p cilium-420729 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo cri-dockerd --version │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo systemctl status containerd --all --full --no-pager │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo systemctl cat containerd --no-pager │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo cat /lib/systemd/system/containerd.service │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo cat /etc/containerd/config.toml │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo containerd config dump │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo systemctl status crio --all --full --no-pager │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo systemctl cat crio --no-pager │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo crio config │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ delete │ -p cilium-420729 │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ 29 Nov 25 09:15 UTC │
│ start │ -p force-systemd-env-559836 --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-env-559836 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ 29 Nov 25 09:16 UTC │
│ ssh │ force-systemd-env-559836 ssh cat /etc/containerd/config.toml │ force-systemd-env-559836 │ jenkins │ v1.37.0 │ 29 Nov 25 09:16 UTC │ 29 Nov 25 09:16 UTC │
│ delete │ -p force-systemd-env-559836 │ force-systemd-env-559836 │ jenkins │ v1.37.0 │ 29 Nov 25 09:16 UTC │ 29 Nov 25 09:16 UTC │
│ start │ -p cert-expiration-592440 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=containerd │ cert-expiration-592440 │ jenkins │ v1.37.0 │ 29 Nov 25 09:16 UTC │ 29 Nov 25 09:17 UTC │
│ delete │ -p running-upgrade-115889 │ running-upgrade-115889 │ jenkins │ v1.37.0 │ 29 Nov 25 09:18 UTC │ 29 Nov 25 09:18 UTC │
│ start │ -p cert-options-515442 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-515442 │ jenkins │ v1.37.0 │ 29 Nov 25 09:18 UTC │ 29 Nov 25 09:19 UTC │
│ ssh │ cert-options-515442 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-515442 │ jenkins │ v1.37.0 │ 29 Nov 25 09:19 UTC │ 29 Nov 25 09:19 UTC │
│ ssh │ -p cert-options-515442 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-515442 │ jenkins │ v1.37.0 │ 29 Nov 25 09:19 UTC │ 29 Nov 25 09:19 UTC │
│ delete │ -p cert-options-515442 │ cert-options-515442 │ jenkins │ v1.37.0 │ 29 Nov 25 09:19 UTC │ 29 Nov 25 09:19 UTC │
│ start │ -p old-k8s-version-071895 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-071895 │ jenkins │ v1.37.0 │ 29 Nov 25 09:19 UTC │ 29 Nov 25 09:20 UTC │
│ start │ -p cert-expiration-592440 --memory=3072 --cert-expiration=8760h --driver=docker --container-runtime=containerd │ cert-expiration-592440 │ jenkins │ v1.37.0 │ 29 Nov 25 09:20 UTC │ 29 Nov 25 09:20 UTC │
│ delete │ -p cert-expiration-592440 │ cert-expiration-592440 │ jenkins │ v1.37.0 │ 29 Nov 25 09:20 UTC │ 29 Nov 25 09:20 UTC │
│ start │ -p no-preload-230403 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ no-preload-230403 │ jenkins │ v1.37.0 │ 29 Nov 25 09:20 UTC │ │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────┴─────────┴─────────┴─────────────────────┴────────────
─────────┘
==> Last Start <==
Log file created at: 2025/11/29 09:20:12
Running on machine: ip-172-31-24-2
Binary: Built with gc go1.25.3 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1129 09:20:12.939624 222878 out.go:360] Setting OutFile to fd 1 ...
I1129 09:20:12.939853 222878 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1129 09:20:12.939881 222878 out.go:374] Setting ErrFile to fd 2...
I1129 09:20:12.939901 222878 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1129 09:20:12.940241 222878 root.go:338] Updating PATH: /home/jenkins/minikube-integration/22000-2317/.minikube/bin
I1129 09:20:12.940820 222878 out.go:368] Setting JSON to false
I1129 09:20:12.941892 222878 start.go:133] hostinfo: {"hostname":"ip-172-31-24-2","uptime":3764,"bootTime":1764404249,"procs":190,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"6d436adf-771e-4269-b9a3-c25fd4fca4f5"}
I1129 09:20:12.941996 222878 start.go:143] virtualization:
I1129 09:20:12.947843 222878 out.go:179] * [no-preload-230403] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I1129 09:20:12.951543 222878 out.go:179] - MINIKUBE_LOCATION=22000
I1129 09:20:12.951778 222878 notify.go:221] Checking for updates...
I1129 09:20:12.959740 222878 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1129 09:20:12.963748 222878 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/22000-2317/kubeconfig
I1129 09:20:12.967028 222878 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/22000-2317/.minikube
I1129 09:20:12.970194 222878 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I1129 09:20:12.973266 222878 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1129 09:20:12.976789 222878 config.go:182] Loaded profile config "old-k8s-version-071895": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1129 09:20:12.976879 222878 driver.go:422] Setting default libvirt URI to qemu:///system
I1129 09:20:13.015916 222878 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I1129 09:20:13.016116 222878 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1129 09:20:13.089040 222878 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-29 09:20:13.078615429 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pat
h:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1129 09:20:13.089149 222878 docker.go:319] overlay module found
I1129 09:20:13.094585 222878 out.go:179] * Using the docker driver based on user configuration
I1129 09:20:13.101060 222878 start.go:309] selected driver: docker
I1129 09:20:13.101087 222878 start.go:927] validating driver "docker" against <nil>
I1129 09:20:13.101110 222878 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1129 09:20:13.101860 222878 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1129 09:20:13.162298 222878 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-29 09:20:13.152737541 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pat
h:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1129 09:20:13.162462 222878 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1129 09:20:13.162689 222878 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1129 09:20:13.165689 222878 out.go:179] * Using Docker driver with root privileges
I1129 09:20:13.168555 222878 cni.go:84] Creating CNI manager for ""
I1129 09:20:13.168702 222878 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1129 09:20:13.168717 222878 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1129 09:20:13.168799 222878 start.go:353] cluster config:
{Name:no-preload-230403 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-230403 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Containe
rRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1129 09:20:13.171944 222878 out.go:179] * Starting "no-preload-230403" primary control-plane node in "no-preload-230403" cluster
I1129 09:20:13.174795 222878 cache.go:134] Beginning downloading kic base image for docker with containerd
I1129 09:20:13.177867 222878 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1129 09:20:13.180600 222878 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1129 09:20:13.180815 222878 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1129 09:20:13.180863 222878 cache.go:107] acquiring lock: {Name:mkc9ca05df03f187ae0239342774baa6ad8c9aea Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.180958 222878 cache.go:107] acquiring lock: {Name:mk1a5c919477c9b6035d1da624b0b2445dbe0e73 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.181026 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.34.1 exists
I1129 09:20:13.181043 222878 cache.go:96] cache image "registry.k8s.io/kube-apiserver:v1.34.1" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.34.1" took 86.212µs
I1129 09:20:13.181062 222878 cache.go:80] save to tar file registry.k8s.io/kube-apiserver:v1.34.1 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.34.1 succeeded
I1129 09:20:13.181080 222878 cache.go:107] acquiring lock: {Name:mk74fc1ce0ee5a4f599a03d41c7dab91b2a2e933 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.181115 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.34.1 exists
I1129 09:20:13.181125 222878 cache.go:96] cache image "registry.k8s.io/kube-controller-manager:v1.34.1" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.34.1" took 46.598µs
I1129 09:20:13.181131 222878 cache.go:80] save to tar file registry.k8s.io/kube-controller-manager:v1.34.1 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.34.1 succeeded
I1129 09:20:13.181141 222878 cache.go:107] acquiring lock: {Name:mk8695629c5903582c523a837d766d417499d914 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.181179 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.34.1 exists
I1129 09:20:13.181189 222878 cache.go:96] cache image "registry.k8s.io/kube-scheduler:v1.34.1" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.34.1" took 49.445µs
I1129 09:20:13.181196 222878 cache.go:80] save to tar file registry.k8s.io/kube-scheduler:v1.34.1 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.34.1 succeeded
I1129 09:20:13.181205 222878 cache.go:107] acquiring lock: {Name:mk6962b4fc4c58f41448580e388a757daf8f6018 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.181239 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.34.1 exists
I1129 09:20:13.181249 222878 cache.go:96] cache image "registry.k8s.io/kube-proxy:v1.34.1" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.34.1" took 44.94µs
I1129 09:20:13.181255 222878 cache.go:80] save to tar file registry.k8s.io/kube-proxy:v1.34.1 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.34.1 succeeded
I1129 09:20:13.181269 222878 cache.go:107] acquiring lock: {Name:mk75f52747e0531666c302459e925614b33b76b2 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.181314 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/pause_3.10.1 exists
I1129 09:20:13.181323 222878 cache.go:96] cache image "registry.k8s.io/pause:3.10.1" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/pause_3.10.1" took 55.639µs
I1129 09:20:13.181332 222878 cache.go:80] save to tar file registry.k8s.io/pause:3.10.1 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/pause_3.10.1 succeeded
I1129 09:20:13.181345 222878 cache.go:107] acquiring lock: {Name:mke59d5887f27460b7717e6fa1d7c7be222b2ad7 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.181380 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.6.4-0 exists
I1129 09:20:13.181391 222878 cache.go:96] cache image "registry.k8s.io/etcd:3.6.4-0" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.6.4-0" took 46.433µs
I1129 09:20:13.181396 222878 cache.go:80] save to tar file registry.k8s.io/etcd:3.6.4-0 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.6.4-0 succeeded
I1129 09:20:13.181409 222878 cache.go:107] acquiring lock: {Name:mkece740ade6508db73b1e245e73f976785e2996 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.181442 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.12.1 exists
I1129 09:20:13.181450 222878 cache.go:96] cache image "registry.k8s.io/coredns/coredns:v1.12.1" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.12.1" took 45.654µs
I1129 09:20:13.181455 222878 cache.go:80] save to tar file registry.k8s.io/coredns/coredns:v1.12.1 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.12.1 succeeded
I1129 09:20:13.181552 222878 profile.go:143] Saving config to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/config.json ...
I1129 09:20:13.181573 222878 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/config.json: {Name:mkedfced3d2b7fa7d1f9faae9aecd4cdc6897bf4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:13.181779 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5 exists
I1129 09:20:13.181796 222878 cache.go:96] cache image "gcr.io/k8s-minikube/storage-provisioner:v5" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5" took 942.365µs
I1129 09:20:13.181804 222878 cache.go:80] save to tar file gcr.io/k8s-minikube/storage-provisioner:v5 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5 succeeded
I1129 09:20:13.181857 222878 cache.go:87] Successfully saved all images to host disk.
I1129 09:20:13.201388 222878 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1129 09:20:13.201410 222878 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1129 09:20:13.201431 222878 cache.go:243] Successfully downloaded all kic artifacts
I1129 09:20:13.201462 222878 start.go:360] acquireMachinesLock for no-preload-230403: {Name:mk2a91c20925489376678f93ce44b3d1de57601f Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.201622 222878 start.go:364] duration metric: took 139.242µs to acquireMachinesLock for "no-preload-230403"
I1129 09:20:13.201663 222878 start.go:93] Provisioning new machine with config: &{Name:no-preload-230403 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-230403 Namespace:default APIServerHAVIP: APIServer
Name:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1129 09:20:13.201746 222878 start.go:125] createHost starting for "" (driver="docker")
I1129 09:20:09.378511 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:09.878391 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:10.379008 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:10.879016 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:11.378477 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:11.879067 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:12.378498 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:12.878370 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:13.378426 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:13.879213 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:14.378760 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:14.880612 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:15.379061 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:15.530412 219229 kubeadm.go:1114] duration metric: took 11.369681639s to wait for elevateKubeSystemPrivileges
I1129 09:20:15.530446 219229 kubeadm.go:403] duration metric: took 31.525981112s to StartCluster
I1129 09:20:15.530463 219229 settings.go:142] acquiring lock: {Name:mk44917d1324740eeda65bf3aa312ad1561d3ed4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:15.530529 219229 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/22000-2317/kubeconfig
I1129 09:20:15.531211 219229 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/kubeconfig: {Name:mk3c09eb9158ba85342a695b6ac4b1a5f69e1b04 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:15.531425 219229 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1129 09:20:15.531520 219229 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1129 09:20:15.531760 219229 config.go:182] Loaded profile config "old-k8s-version-071895": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1129 09:20:15.531752 219229 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1129 09:20:15.531869 219229 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-071895"
I1129 09:20:15.531886 219229 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-071895"
I1129 09:20:15.531914 219229 host.go:66] Checking if "old-k8s-version-071895" exists ...
I1129 09:20:15.532442 219229 cli_runner.go:164] Run: docker container inspect old-k8s-version-071895 --format={{.State.Status}}
I1129 09:20:15.532702 219229 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-071895"
I1129 09:20:15.532736 219229 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-071895"
I1129 09:20:15.533094 219229 cli_runner.go:164] Run: docker container inspect old-k8s-version-071895 --format={{.State.Status}}
I1129 09:20:15.536113 219229 out.go:179] * Verifying Kubernetes components...
I1129 09:20:15.539443 219229 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:20:15.574128 219229 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-071895"
I1129 09:20:15.574169 219229 host.go:66] Checking if "old-k8s-version-071895" exists ...
I1129 09:20:15.574614 219229 cli_runner.go:164] Run: docker container inspect old-k8s-version-071895 --format={{.State.Status}}
I1129 09:20:15.575661 219229 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:20:15.578616 219229 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1129 09:20:15.578636 219229 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1129 09:20:15.578703 219229 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-071895
I1129 09:20:15.596399 219229 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1129 09:20:15.596427 219229 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1129 09:20:15.596503 219229 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-071895
I1129 09:20:15.630157 219229 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/old-k8s-version-071895/id_rsa Username:docker}
I1129 09:20:15.639128 219229 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/old-k8s-version-071895/id_rsa Username:docker}
I1129 09:20:15.896152 219229 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1129 09:20:15.896336 219229 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1129 09:20:16.015161 219229 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1129 09:20:16.026843 219229 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1129 09:20:17.194520 219229 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.298139458s)
I1129 09:20:17.194560 219229 start.go:977] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I1129 09:20:17.195641 219229 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (1.299459942s)
I1129 09:20:17.196336 219229 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-071895" to be "Ready" ...
I1129 09:20:17.598641 219229 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.583439516s)
I1129 09:20:17.598752 219229 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (1.571873758s)
I1129 09:20:17.633446 219229 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1129 09:20:13.207006 222878 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1129 09:20:13.207293 222878 start.go:159] libmachine.API.Create for "no-preload-230403" (driver="docker")
I1129 09:20:13.207340 222878 client.go:173] LocalClient.Create starting
I1129 09:20:13.207488 222878 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca.pem
I1129 09:20:13.207529 222878 main.go:143] libmachine: Decoding PEM data...
I1129 09:20:13.207573 222878 main.go:143] libmachine: Parsing certificate...
I1129 09:20:13.207655 222878 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22000-2317/.minikube/certs/cert.pem
I1129 09:20:13.207690 222878 main.go:143] libmachine: Decoding PEM data...
I1129 09:20:13.207710 222878 main.go:143] libmachine: Parsing certificate...
I1129 09:20:13.208128 222878 cli_runner.go:164] Run: docker network inspect no-preload-230403 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1129 09:20:13.227770 222878 cli_runner.go:211] docker network inspect no-preload-230403 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1129 09:20:13.227856 222878 network_create.go:284] running [docker network inspect no-preload-230403] to gather additional debugging logs...
I1129 09:20:13.227880 222878 cli_runner.go:164] Run: docker network inspect no-preload-230403
W1129 09:20:13.250504 222878 cli_runner.go:211] docker network inspect no-preload-230403 returned with exit code 1
I1129 09:20:13.250537 222878 network_create.go:287] error running [docker network inspect no-preload-230403]: docker network inspect no-preload-230403: exit status 1
stdout:
[]
stderr:
Error response from daemon: network no-preload-230403 not found
I1129 09:20:13.250551 222878 network_create.go:289] output of [docker network inspect no-preload-230403]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network no-preload-230403 not found
** /stderr **
I1129 09:20:13.250655 222878 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1129 09:20:13.269213 222878 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-8664e809540f IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:c2:5a:a5:48:89:fb} reservation:<nil>}
I1129 09:20:13.269665 222878 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-fe5a1fed3d29 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:8e:0c:ca:69:14:77} reservation:<nil>}
I1129 09:20:13.270007 222878 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-c3b36bc67c6b IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:02:2d:06:dd:2d:03} reservation:<nil>}
I1129 09:20:13.270333 222878 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-46e34ec2f3d7 IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:7a:63:b9:c9:b8:a0} reservation:<nil>}
I1129 09:20:13.270853 222878 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001a000e0}
I1129 09:20:13.270885 222878 network_create.go:124] attempt to create docker network no-preload-230403 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1129 09:20:13.270944 222878 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=no-preload-230403 no-preload-230403
I1129 09:20:13.339116 222878 network_create.go:108] docker network no-preload-230403 192.168.85.0/24 created
I1129 09:20:13.339148 222878 kic.go:121] calculated static IP "192.168.85.2" for the "no-preload-230403" container
I1129 09:20:13.339222 222878 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1129 09:20:13.358931 222878 cli_runner.go:164] Run: docker volume create no-preload-230403 --label name.minikube.sigs.k8s.io=no-preload-230403 --label created_by.minikube.sigs.k8s.io=true
I1129 09:20:13.376848 222878 oci.go:103] Successfully created a docker volume no-preload-230403
I1129 09:20:13.376977 222878 cli_runner.go:164] Run: docker run --rm --name no-preload-230403-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-230403 --entrypoint /usr/bin/test -v no-preload-230403:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1129 09:20:13.960824 222878 oci.go:107] Successfully prepared a docker volume no-preload-230403
I1129 09:20:13.960886 222878 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
W1129 09:20:13.961020 222878 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1129 09:20:13.961137 222878 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1129 09:20:14.052602 222878 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname no-preload-230403 --name no-preload-230403 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-230403 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=no-preload-230403 --network no-preload-230403 --ip 192.168.85.2 --volume no-preload-230403:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1129 09:20:14.434508 222878 cli_runner.go:164] Run: docker container inspect no-preload-230403 --format={{.State.Running}}
I1129 09:20:14.469095 222878 cli_runner.go:164] Run: docker container inspect no-preload-230403 --format={{.State.Status}}
I1129 09:20:14.505837 222878 cli_runner.go:164] Run: docker exec no-preload-230403 stat /var/lib/dpkg/alternatives/iptables
I1129 09:20:14.574820 222878 oci.go:144] the created container "no-preload-230403" has a running status.
I1129 09:20:14.574847 222878 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/22000-2317/.minikube/machines/no-preload-230403/id_rsa...
I1129 09:20:14.765899 222878 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/22000-2317/.minikube/machines/no-preload-230403/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1129 09:20:14.803197 222878 cli_runner.go:164] Run: docker container inspect no-preload-230403 --format={{.State.Status}}
I1129 09:20:14.838341 222878 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1129 09:20:14.838366 222878 kic_runner.go:114] Args: [docker exec --privileged no-preload-230403 chown docker:docker /home/docker/.ssh/authorized_keys]
I1129 09:20:14.971747 222878 cli_runner.go:164] Run: docker container inspect no-preload-230403 --format={{.State.Status}}
I1129 09:20:14.997195 222878 machine.go:94] provisionDockerMachine start ...
I1129 09:20:14.997331 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:15.036227 222878 main.go:143] libmachine: Using SSH client type: native
I1129 09:20:15.036638 222878 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33058 <nil> <nil>}
I1129 09:20:15.036651 222878 main.go:143] libmachine: About to run SSH command:
hostname
I1129 09:20:15.042876 222878 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1129 09:20:17.636479 219229 addons.go:530] duration metric: took 2.104720222s for enable addons: enabled=[storage-provisioner default-storageclass]
I1129 09:20:17.699584 219229 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-071895" context rescaled to 1 replicas
W1129 09:20:19.201224 219229 node_ready.go:57] node "old-k8s-version-071895" has "Ready":"False" status (will retry)
I1129 09:20:18.208511 222878 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-230403
I1129 09:20:18.208576 222878 ubuntu.go:182] provisioning hostname "no-preload-230403"
I1129 09:20:18.208750 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:18.231955 222878 main.go:143] libmachine: Using SSH client type: native
I1129 09:20:18.232303 222878 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33058 <nil> <nil>}
I1129 09:20:18.232314 222878 main.go:143] libmachine: About to run SSH command:
sudo hostname no-preload-230403 && echo "no-preload-230403" | sudo tee /etc/hostname
I1129 09:20:18.417308 222878 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-230403
I1129 09:20:18.417502 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:18.446833 222878 main.go:143] libmachine: Using SSH client type: native
I1129 09:20:18.447196 222878 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33058 <nil> <nil>}
I1129 09:20:18.447217 222878 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sno-preload-230403' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 no-preload-230403/g' /etc/hosts;
else
echo '127.0.1.1 no-preload-230403' | sudo tee -a /etc/hosts;
fi
fi
I1129 09:20:18.609294 222878 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1129 09:20:18.609323 222878 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/22000-2317/.minikube CaCertPath:/home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/22000-2317/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/22000-2317/.minikube}
I1129 09:20:18.609357 222878 ubuntu.go:190] setting up certificates
I1129 09:20:18.609367 222878 provision.go:84] configureAuth start
I1129 09:20:18.609424 222878 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-230403
I1129 09:20:18.633658 222878 provision.go:143] copyHostCerts
I1129 09:20:18.633724 222878 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-2317/.minikube/ca.pem, removing ...
I1129 09:20:18.633733 222878 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-2317/.minikube/ca.pem
I1129 09:20:18.633804 222878 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/22000-2317/.minikube/ca.pem (1082 bytes)
I1129 09:20:18.633884 222878 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-2317/.minikube/cert.pem, removing ...
I1129 09:20:18.633890 222878 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-2317/.minikube/cert.pem
I1129 09:20:18.633917 222878 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/22000-2317/.minikube/cert.pem (1123 bytes)
I1129 09:20:18.633975 222878 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-2317/.minikube/key.pem, removing ...
I1129 09:20:18.633979 222878 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-2317/.minikube/key.pem
I1129 09:20:18.634022 222878 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/22000-2317/.minikube/key.pem (1679 bytes)
I1129 09:20:18.634072 222878 provision.go:117] generating server cert: /home/jenkins/minikube-integration/22000-2317/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca-key.pem org=jenkins.no-preload-230403 san=[127.0.0.1 192.168.85.2 localhost minikube no-preload-230403]
I1129 09:20:18.830643 222878 provision.go:177] copyRemoteCerts
I1129 09:20:18.830732 222878 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1129 09:20:18.830804 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:18.849046 222878 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/no-preload-230403/id_rsa Username:docker}
I1129 09:20:18.957503 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1129 09:20:18.982683 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1129 09:20:19.017142 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1129 09:20:19.036354 222878 provision.go:87] duration metric: took 426.964935ms to configureAuth
I1129 09:20:19.036391 222878 ubuntu.go:206] setting minikube options for container-runtime
I1129 09:20:19.036594 222878 config.go:182] Loaded profile config "no-preload-230403": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1129 09:20:19.036608 222878 machine.go:97] duration metric: took 4.039383275s to provisionDockerMachine
I1129 09:20:19.036705 222878 client.go:176] duration metric: took 5.829342348s to LocalClient.Create
I1129 09:20:19.036723 222878 start.go:167] duration metric: took 5.829433418s to libmachine.API.Create "no-preload-230403"
I1129 09:20:19.036733 222878 start.go:293] postStartSetup for "no-preload-230403" (driver="docker")
I1129 09:20:19.036744 222878 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1129 09:20:19.036810 222878 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1129 09:20:19.036863 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:19.054558 222878 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/no-preload-230403/id_rsa Username:docker}
I1129 09:20:19.161154 222878 ssh_runner.go:195] Run: cat /etc/os-release
I1129 09:20:19.165056 222878 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1129 09:20:19.165086 222878 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1129 09:20:19.165116 222878 filesync.go:126] Scanning /home/jenkins/minikube-integration/22000-2317/.minikube/addons for local assets ...
I1129 09:20:19.165196 222878 filesync.go:126] Scanning /home/jenkins/minikube-integration/22000-2317/.minikube/files for local assets ...
I1129 09:20:19.165294 222878 filesync.go:149] local asset: /home/jenkins/minikube-integration/22000-2317/.minikube/files/etc/ssl/certs/41372.pem -> 41372.pem in /etc/ssl/certs
I1129 09:20:19.165459 222878 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1129 09:20:19.175008 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/files/etc/ssl/certs/41372.pem --> /etc/ssl/certs/41372.pem (1708 bytes)
I1129 09:20:19.202166 222878 start.go:296] duration metric: took 165.419871ms for postStartSetup
I1129 09:20:19.202535 222878 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-230403
I1129 09:20:19.222107 222878 profile.go:143] Saving config to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/config.json ...
I1129 09:20:19.222396 222878 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1129 09:20:19.222436 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:19.240201 222878 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/no-preload-230403/id_rsa Username:docker}
I1129 09:20:19.346358 222878 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1129 09:20:19.351907 222878 start.go:128] duration metric: took 6.150146246s to createHost
I1129 09:20:19.351975 222878 start.go:83] releasing machines lock for "no-preload-230403", held for 6.150337057s
I1129 09:20:19.352082 222878 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-230403
I1129 09:20:19.369647 222878 ssh_runner.go:195] Run: cat /version.json
I1129 09:20:19.369701 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:19.369794 222878 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1129 09:20:19.369854 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:19.412764 222878 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/no-preload-230403/id_rsa Username:docker}
I1129 09:20:19.422423 222878 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/no-preload-230403/id_rsa Username:docker}
I1129 09:20:19.618519 222878 ssh_runner.go:195] Run: systemctl --version
I1129 09:20:19.626187 222878 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1129 09:20:19.630590 222878 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1129 09:20:19.630681 222878 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1129 09:20:19.659536 222878 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I1129 09:20:19.659559 222878 start.go:496] detecting cgroup driver to use...
I1129 09:20:19.659594 222878 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1129 09:20:19.659644 222878 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1129 09:20:19.675641 222878 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1129 09:20:19.690722 222878 docker.go:218] disabling cri-docker service (if available) ...
I1129 09:20:19.690795 222878 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1129 09:20:19.710602 222878 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1129 09:20:19.735104 222878 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1129 09:20:19.862098 222878 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1129 09:20:20.020548 222878 docker.go:234] disabling docker service ...
I1129 09:20:20.020764 222878 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1129 09:20:20.049579 222878 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1129 09:20:20.066560 222878 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1129 09:20:20.195869 222878 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1129 09:20:20.317681 222878 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1129 09:20:20.332092 222878 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1129 09:20:20.348128 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1129 09:20:20.359261 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1129 09:20:20.369657 222878 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1129 09:20:20.369726 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1129 09:20:20.379235 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1129 09:20:20.388089 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1129 09:20:20.397442 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1129 09:20:20.406391 222878 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1129 09:20:20.414674 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1129 09:20:20.423896 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1129 09:20:20.432684 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1129 09:20:20.441584 222878 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1129 09:20:20.449626 222878 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1129 09:20:20.458580 222878 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:20:20.578649 222878 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1129 09:20:20.669910 222878 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1129 09:20:20.670001 222878 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1129 09:20:20.674049 222878 start.go:564] Will wait 60s for crictl version
I1129 09:20:20.674121 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:20.677882 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1129 09:20:20.711552 222878 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1129 09:20:20.711620 222878 ssh_runner.go:195] Run: containerd --version
I1129 09:20:20.734338 222878 ssh_runner.go:195] Run: containerd --version
I1129 09:20:20.760452 222878 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
I1129 09:20:20.763394 222878 cli_runner.go:164] Run: docker network inspect no-preload-230403 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1129 09:20:20.779886 222878 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I1129 09:20:20.783617 222878 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1129 09:20:20.793588 222878 kubeadm.go:884] updating cluster {Name:no-preload-230403 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-230403 Namespace:default APIServerHAVIP: APIServerName:minikubeCA API
ServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuF
irmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1129 09:20:20.793740 222878 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1129 09:20:20.793820 222878 ssh_runner.go:195] Run: sudo crictl images --output json
I1129 09:20:20.818996 222878 containerd.go:623] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.34.1". assuming images are not preloaded.
I1129 09:20:20.819021 222878 cache_images.go:90] LoadCachedImages start: [registry.k8s.io/kube-apiserver:v1.34.1 registry.k8s.io/kube-controller-manager:v1.34.1 registry.k8s.io/kube-scheduler:v1.34.1 registry.k8s.io/kube-proxy:v1.34.1 registry.k8s.io/pause:3.10.1 registry.k8s.io/etcd:3.6.4-0 registry.k8s.io/coredns/coredns:v1.12.1 gcr.io/k8s-minikube/storage-provisioner:v5]
I1129 09:20:20.819075 222878 image.go:138] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:20:20.819290 222878 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:20:20.819377 222878 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:20:20.819472 222878 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:20:20.819580 222878 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1129 09:20:20.819670 222878 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1129 09:20:20.819757 222878 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1129 09:20:20.819836 222878 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:20:20.820993 222878 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:20:20.821570 222878 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:20:20.821829 222878 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:20:20.821983 222878 image.go:181] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:20:20.822235 222878 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1129 09:20:20.822385 222878 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:20:20.822667 222878 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1129 09:20:20.823079 222878 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1129 09:20:21.122603 222878 containerd.go:267] Checking existence of image with name "registry.k8s.io/pause:3.10.1" and sha "d7b100cd9a77ba782c5e428c8dd5a1df4a1e79d4cb6294acd7d01290ab3babbd"
I1129 09:20:21.122681 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/pause:3.10.1
I1129 09:20:21.142272 222878 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.34.1" and sha "7eb2c6ff0c5a768fd309321bc2ade0e4e11afcf4f2017ef1d0ff00d91fdf992a"
I1129 09:20:21.142372 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:20:21.156765 222878 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.34.1" and sha "b5f57ec6b98676d815366685a0422bd164ecf0732540b79ac51b1186cef97ff0"
I1129 09:20:21.156842 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:20:21.158253 222878 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.34.1" and sha "43911e833d64d4f30460862fc0c54bb61999d60bc7d063feca71e9fc610d5196"
I1129 09:20:21.158318 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:20:21.159304 222878 containerd.go:267] Checking existence of image with name "registry.k8s.io/coredns/coredns:v1.12.1" and sha "138784d87c9c50f8e59412544da4cf4928d61ccbaf93b9f5898a3ba406871bfc"
I1129 09:20:21.159366 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:20:21.163083 222878 containerd.go:267] Checking existence of image with name "registry.k8s.io/etcd:3.6.4-0" and sha "a1894772a478e07c67a56e8bf32335fdbe1dd4ec96976a5987083164bd00bc0e"
I1129 09:20:21.163151 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/etcd:3.6.4-0
I1129 09:20:21.163275 222878 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.34.1" and sha "05baa95f5142d87797a2bc1d3d11edfb0bf0a9236d436243d15061fae8b58cb9"
I1129 09:20:21.163342 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-proxy:v1.34.1
I1129 09:20:21.165618 222878 cache_images.go:118] "registry.k8s.io/pause:3.10.1" needs transfer: "registry.k8s.io/pause:3.10.1" does not exist at hash "d7b100cd9a77ba782c5e428c8dd5a1df4a1e79d4cb6294acd7d01290ab3babbd" in container runtime
I1129 09:20:21.165704 222878 cri.go:218] Removing image: registry.k8s.io/pause:3.10.1
I1129 09:20:21.165791 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:21.179345 222878 cache_images.go:118] "registry.k8s.io/kube-controller-manager:v1.34.1" needs transfer: "registry.k8s.io/kube-controller-manager:v1.34.1" does not exist at hash "7eb2c6ff0c5a768fd309321bc2ade0e4e11afcf4f2017ef1d0ff00d91fdf992a" in container runtime
I1129 09:20:21.179432 222878 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:20:21.179520 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:21.225665 222878 cache_images.go:118] "registry.k8s.io/kube-scheduler:v1.34.1" needs transfer: "registry.k8s.io/kube-scheduler:v1.34.1" does not exist at hash "b5f57ec6b98676d815366685a0422bd164ecf0732540b79ac51b1186cef97ff0" in container runtime
I1129 09:20:21.225755 222878 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:20:21.225854 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:21.225939 222878 cache_images.go:118] "registry.k8s.io/kube-apiserver:v1.34.1" needs transfer: "registry.k8s.io/kube-apiserver:v1.34.1" does not exist at hash "43911e833d64d4f30460862fc0c54bb61999d60bc7d063feca71e9fc610d5196" in container runtime
I1129 09:20:21.225991 222878 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:20:21.226032 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:21.226126 222878 cache_images.go:118] "registry.k8s.io/coredns/coredns:v1.12.1" needs transfer: "registry.k8s.io/coredns/coredns:v1.12.1" does not exist at hash "138784d87c9c50f8e59412544da4cf4928d61ccbaf93b9f5898a3ba406871bfc" in container runtime
I1129 09:20:21.226162 222878 cri.go:218] Removing image: registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:20:21.226209 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:21.237496 222878 cache_images.go:118] "registry.k8s.io/etcd:3.6.4-0" needs transfer: "registry.k8s.io/etcd:3.6.4-0" does not exist at hash "a1894772a478e07c67a56e8bf32335fdbe1dd4ec96976a5987083164bd00bc0e" in container runtime
I1129 09:20:21.237581 222878 cri.go:218] Removing image: registry.k8s.io/etcd:3.6.4-0
I1129 09:20:21.237665 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:21.239070 222878 cache_images.go:118] "registry.k8s.io/kube-proxy:v1.34.1" needs transfer: "registry.k8s.io/kube-proxy:v1.34.1" does not exist at hash "05baa95f5142d87797a2bc1d3d11edfb0bf0a9236d436243d15061fae8b58cb9" in container runtime
I1129 09:20:21.239288 222878 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.34.1
I1129 09:20:21.239346 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:21.239286 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:20:21.239244 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1129 09:20:21.240343 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:20:21.240430 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:20:21.240578 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:20:21.248302 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1129 09:20:21.337972 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1129 09:20:21.338141 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1129 09:20:21.338156 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:20:21.350334 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:20:21.350500 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:20:21.350586 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:20:21.350679 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1129 09:20:21.436779 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:20:21.436931 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1129 09:20:21.437008 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1129 09:20:21.482969 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:20:21.483085 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:20:21.483137 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:20:21.491181 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1129 09:20:21.551573 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/pause_3.10.1
I1129 09:20:21.551783 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1
I1129 09:20:21.551782 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1129 09:20:21.551677 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.34.1
I1129 09:20:21.551991 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1129 09:20:21.589991 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.12.1
I1129 09:20:21.590095 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1
I1129 09:20:21.590176 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.34.1
I1129 09:20:21.590233 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1
I1129 09:20:21.590311 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.34.1
I1129 09:20:21.590381 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1
I1129 09:20:21.599084 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.6.4-0
I1129 09:20:21.599203 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0
I1129 09:20:21.606906 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-controller-manager_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-controller-manager_v1.34.1': No such file or directory
I1129 09:20:21.607120 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.34.1 --> /var/lib/minikube/images/kube-controller-manager_v1.34.1 (20730880 bytes)
I1129 09:20:21.607120 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-scheduler_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-scheduler_v1.34.1': No such file or directory
I1129 09:20:21.607245 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.34.1 --> /var/lib/minikube/images/kube-scheduler_v1.34.1 (15790592 bytes)
I1129 09:20:21.607065 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.34.1
I1129 09:20:21.607080 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-apiserver_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-apiserver_v1.34.1': No such file or directory
I1129 09:20:21.607377 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.34.1 --> /var/lib/minikube/images/kube-apiserver_v1.34.1 (24581632 bytes)
I1129 09:20:21.607089 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/coredns_v1.12.1: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/coredns_v1.12.1': No such file or directory
I1129 09:20:21.607470 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.12.1 --> /var/lib/minikube/images/coredns_v1.12.1 (20402176 bytes)
I1129 09:20:21.607010 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/pause_3.10.1: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/pause_3.10.1': No such file or directory
I1129 09:20:21.607558 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/pause_3.10.1 --> /var/lib/minikube/images/pause_3.10.1 (268288 bytes)
I1129 09:20:21.607693 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1
I1129 09:20:21.611409 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/etcd_3.6.4-0: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/etcd_3.6.4-0': No such file or directory
I1129 09:20:21.611475 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.6.4-0 --> /var/lib/minikube/images/etcd_3.6.4-0 (98216960 bytes)
I1129 09:20:21.621627 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-proxy_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-proxy_v1.34.1': No such file or directory
I1129 09:20:21.621809 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.34.1 --> /var/lib/minikube/images/kube-proxy_v1.34.1 (22790144 bytes)
I1129 09:20:21.715246 222878 containerd.go:285] Loading image: /var/lib/minikube/images/pause_3.10.1
I1129 09:20:21.715371 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/pause_3.10.1
I1129 09:20:22.049743 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/pause_3.10.1 from cache
I1129 09:20:22.146786 222878 containerd.go:285] Loading image: /var/lib/minikube/images/kube-scheduler_v1.34.1
I1129 09:20:22.146909 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1
W1129 09:20:22.239238 222878 image.go:286] image gcr.io/k8s-minikube/storage-provisioner:v5 arch mismatch: want arm64 got amd64. fixing
I1129 09:20:22.239372 222878 containerd.go:267] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "66749159455b3f08c8318fe0233122f54d0f5889f9c5fdfb73c3fd9d99895b51"
I1129 09:20:22.239461 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5
W1129 09:20:21.201342 219229 node_ready.go:57] node "old-k8s-version-071895" has "Ready":"False" status (will retry)
W1129 09:20:23.202246 219229 node_ready.go:57] node "old-k8s-version-071895" has "Ready":"False" status (will retry)
I1129 09:20:23.813839 222878 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1: (1.666881209s)
I1129 09:20:23.813866 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.34.1 from cache
I1129 09:20:23.813884 222878 containerd.go:285] Loading image: /var/lib/minikube/images/coredns_v1.12.1
I1129 09:20:23.813934 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1
I1129 09:20:23.813990 222878 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5: (1.574510089s)
I1129 09:20:23.814059 222878 cache_images.go:118] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "66749159455b3f08c8318fe0233122f54d0f5889f9c5fdfb73c3fd9d99895b51" in container runtime
I1129 09:20:23.814109 222878 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:20:23.814162 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:25.262220 222878 ssh_runner.go:235] Completed: which crictl: (1.448029919s)
I1129 09:20:25.262315 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:20:25.262227 222878 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1: (1.44826357s)
I1129 09:20:25.262380 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.12.1 from cache
I1129 09:20:25.262400 222878 containerd.go:285] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1129 09:20:25.262443 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1129 09:20:26.253409 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.34.1 from cache
I1129 09:20:26.253448 222878 containerd.go:285] Loading image: /var/lib/minikube/images/kube-apiserver_v1.34.1
I1129 09:20:26.253502 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1
I1129 09:20:26.253588 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:20:27.306910 222878 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1: (1.053379529s)
I1129 09:20:27.306932 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.34.1 from cache
I1129 09:20:27.306934 222878 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.053324259s)
I1129 09:20:27.306948 222878 containerd.go:285] Loading image: /var/lib/minikube/images/kube-proxy_v1.34.1
I1129 09:20:27.306998 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:20:27.306998 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1
I1129 09:20:27.339643 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5
I1129 09:20:27.339756 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5
W1129 09:20:25.701399 219229 node_ready.go:57] node "old-k8s-version-071895" has "Ready":"False" status (will retry)
W1129 09:20:28.200255 219229 node_ready.go:57] node "old-k8s-version-071895" has "Ready":"False" status (will retry)
I1129 09:20:29.701513 219229 node_ready.go:49] node "old-k8s-version-071895" is "Ready"
I1129 09:20:29.701545 219229 node_ready.go:38] duration metric: took 12.504000526s for node "old-k8s-version-071895" to be "Ready" ...
I1129 09:20:29.701560 219229 api_server.go:52] waiting for apiserver process to appear ...
I1129 09:20:29.701622 219229 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1129 09:20:29.719485 219229 api_server.go:72] duration metric: took 14.188022937s to wait for apiserver process to appear ...
I1129 09:20:29.719511 219229 api_server.go:88] waiting for apiserver healthz status ...
I1129 09:20:29.719530 219229 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1129 09:20:29.736520 219229 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I1129 09:20:29.740376 219229 api_server.go:141] control plane version: v1.28.0
I1129 09:20:29.740411 219229 api_server.go:131] duration metric: took 20.892436ms to wait for apiserver health ...
I1129 09:20:29.740421 219229 system_pods.go:43] waiting for kube-system pods to appear ...
I1129 09:20:29.748136 219229 system_pods.go:59] 8 kube-system pods found
I1129 09:20:29.748178 219229 system_pods.go:61] "coredns-5dd5756b68-htmzr" [c6b5f2ee-df4f-40a3-be2e-6f16e858e497] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:20:29.748186 219229 system_pods.go:61] "etcd-old-k8s-version-071895" [79f6e3b1-4d0e-480f-ba81-e9c28edc83ac] Running
I1129 09:20:29.748192 219229 system_pods.go:61] "kindnet-58g5f" [d4743cee-0834-4a44-9cf7-d0228aa5b843] Running
I1129 09:20:29.748201 219229 system_pods.go:61] "kube-apiserver-old-k8s-version-071895" [81748b80-7ec0-4a82-b646-673534a05137] Running
I1129 09:20:29.748206 219229 system_pods.go:61] "kube-controller-manager-old-k8s-version-071895" [b6691622-dfbd-4b77-bedd-c7a97120a360] Running
I1129 09:20:29.748209 219229 system_pods.go:61] "kube-proxy-4jxrn" [3e4bdb82-85e5-468b-80dc-0481c990f117] Running
I1129 09:20:29.748213 219229 system_pods.go:61] "kube-scheduler-old-k8s-version-071895" [fe7f98e1-0743-41d8-869a-4807c081f621] Running
I1129 09:20:29.748219 219229 system_pods.go:61] "storage-provisioner" [784fe707-ae15-4eae-a70c-ec084ce3d812] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:20:29.748231 219229 system_pods.go:74] duration metric: took 7.804151ms to wait for pod list to return data ...
I1129 09:20:29.748241 219229 default_sa.go:34] waiting for default service account to be created ...
I1129 09:20:29.751107 219229 default_sa.go:45] found service account: "default"
I1129 09:20:29.751135 219229 default_sa.go:55] duration metric: took 2.887312ms for default service account to be created ...
I1129 09:20:29.751147 219229 system_pods.go:116] waiting for k8s-apps to be running ...
I1129 09:20:29.757754 219229 system_pods.go:86] 8 kube-system pods found
I1129 09:20:29.757797 219229 system_pods.go:89] "coredns-5dd5756b68-htmzr" [c6b5f2ee-df4f-40a3-be2e-6f16e858e497] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:20:29.757804 219229 system_pods.go:89] "etcd-old-k8s-version-071895" [79f6e3b1-4d0e-480f-ba81-e9c28edc83ac] Running
I1129 09:20:29.757810 219229 system_pods.go:89] "kindnet-58g5f" [d4743cee-0834-4a44-9cf7-d0228aa5b843] Running
I1129 09:20:29.757815 219229 system_pods.go:89] "kube-apiserver-old-k8s-version-071895" [81748b80-7ec0-4a82-b646-673534a05137] Running
I1129 09:20:29.757819 219229 system_pods.go:89] "kube-controller-manager-old-k8s-version-071895" [b6691622-dfbd-4b77-bedd-c7a97120a360] Running
I1129 09:20:29.757823 219229 system_pods.go:89] "kube-proxy-4jxrn" [3e4bdb82-85e5-468b-80dc-0481c990f117] Running
I1129 09:20:29.757827 219229 system_pods.go:89] "kube-scheduler-old-k8s-version-071895" [fe7f98e1-0743-41d8-869a-4807c081f621] Running
I1129 09:20:29.757833 219229 system_pods.go:89] "storage-provisioner" [784fe707-ae15-4eae-a70c-ec084ce3d812] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:20:29.757863 219229 retry.go:31] will retry after 212.604223ms: missing components: kube-dns
I1129 09:20:29.976302 219229 system_pods.go:86] 8 kube-system pods found
I1129 09:20:29.976339 219229 system_pods.go:89] "coredns-5dd5756b68-htmzr" [c6b5f2ee-df4f-40a3-be2e-6f16e858e497] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:20:29.976347 219229 system_pods.go:89] "etcd-old-k8s-version-071895" [79f6e3b1-4d0e-480f-ba81-e9c28edc83ac] Running
I1129 09:20:29.976353 219229 system_pods.go:89] "kindnet-58g5f" [d4743cee-0834-4a44-9cf7-d0228aa5b843] Running
I1129 09:20:29.976359 219229 system_pods.go:89] "kube-apiserver-old-k8s-version-071895" [81748b80-7ec0-4a82-b646-673534a05137] Running
I1129 09:20:29.976364 219229 system_pods.go:89] "kube-controller-manager-old-k8s-version-071895" [b6691622-dfbd-4b77-bedd-c7a97120a360] Running
I1129 09:20:29.976368 219229 system_pods.go:89] "kube-proxy-4jxrn" [3e4bdb82-85e5-468b-80dc-0481c990f117] Running
I1129 09:20:29.976373 219229 system_pods.go:89] "kube-scheduler-old-k8s-version-071895" [fe7f98e1-0743-41d8-869a-4807c081f621] Running
I1129 09:20:29.976379 219229 system_pods.go:89] "storage-provisioner" [784fe707-ae15-4eae-a70c-ec084ce3d812] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:20:29.976398 219229 retry.go:31] will retry after 279.278138ms: missing components: kube-dns
I1129 09:20:30.268579 219229 system_pods.go:86] 8 kube-system pods found
I1129 09:20:30.268774 219229 system_pods.go:89] "coredns-5dd5756b68-htmzr" [c6b5f2ee-df4f-40a3-be2e-6f16e858e497] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:20:30.268790 219229 system_pods.go:89] "etcd-old-k8s-version-071895" [79f6e3b1-4d0e-480f-ba81-e9c28edc83ac] Running
I1129 09:20:30.268797 219229 system_pods.go:89] "kindnet-58g5f" [d4743cee-0834-4a44-9cf7-d0228aa5b843] Running
I1129 09:20:30.268802 219229 system_pods.go:89] "kube-apiserver-old-k8s-version-071895" [81748b80-7ec0-4a82-b646-673534a05137] Running
I1129 09:20:30.268807 219229 system_pods.go:89] "kube-controller-manager-old-k8s-version-071895" [b6691622-dfbd-4b77-bedd-c7a97120a360] Running
I1129 09:20:30.268811 219229 system_pods.go:89] "kube-proxy-4jxrn" [3e4bdb82-85e5-468b-80dc-0481c990f117] Running
I1129 09:20:30.268816 219229 system_pods.go:89] "kube-scheduler-old-k8s-version-071895" [fe7f98e1-0743-41d8-869a-4807c081f621] Running
I1129 09:20:30.268826 219229 system_pods.go:89] "storage-provisioner" [784fe707-ae15-4eae-a70c-ec084ce3d812] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:20:30.268843 219229 retry.go:31] will retry after 368.451427ms: missing components: kube-dns
I1129 09:20:30.642681 219229 system_pods.go:86] 8 kube-system pods found
I1129 09:20:30.642718 219229 system_pods.go:89] "coredns-5dd5756b68-htmzr" [c6b5f2ee-df4f-40a3-be2e-6f16e858e497] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:20:30.642726 219229 system_pods.go:89] "etcd-old-k8s-version-071895" [79f6e3b1-4d0e-480f-ba81-e9c28edc83ac] Running
I1129 09:20:30.642733 219229 system_pods.go:89] "kindnet-58g5f" [d4743cee-0834-4a44-9cf7-d0228aa5b843] Running
I1129 09:20:30.642738 219229 system_pods.go:89] "kube-apiserver-old-k8s-version-071895" [81748b80-7ec0-4a82-b646-673534a05137] Running
I1129 09:20:30.642743 219229 system_pods.go:89] "kube-controller-manager-old-k8s-version-071895" [b6691622-dfbd-4b77-bedd-c7a97120a360] Running
I1129 09:20:30.642747 219229 system_pods.go:89] "kube-proxy-4jxrn" [3e4bdb82-85e5-468b-80dc-0481c990f117] Running
I1129 09:20:30.642752 219229 system_pods.go:89] "kube-scheduler-old-k8s-version-071895" [fe7f98e1-0743-41d8-869a-4807c081f621] Running
I1129 09:20:30.642761 219229 system_pods.go:89] "storage-provisioner" [784fe707-ae15-4eae-a70c-ec084ce3d812] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:20:30.642776 219229 retry.go:31] will retry after 521.296683ms: missing components: kube-dns
I1129 09:20:31.171413 219229 system_pods.go:86] 8 kube-system pods found
I1129 09:20:31.171442 219229 system_pods.go:89] "coredns-5dd5756b68-htmzr" [c6b5f2ee-df4f-40a3-be2e-6f16e858e497] Running
I1129 09:20:31.171449 219229 system_pods.go:89] "etcd-old-k8s-version-071895" [79f6e3b1-4d0e-480f-ba81-e9c28edc83ac] Running
I1129 09:20:31.171454 219229 system_pods.go:89] "kindnet-58g5f" [d4743cee-0834-4a44-9cf7-d0228aa5b843] Running
I1129 09:20:31.171472 219229 system_pods.go:89] "kube-apiserver-old-k8s-version-071895" [81748b80-7ec0-4a82-b646-673534a05137] Running
I1129 09:20:31.171482 219229 system_pods.go:89] "kube-controller-manager-old-k8s-version-071895" [b6691622-dfbd-4b77-bedd-c7a97120a360] Running
I1129 09:20:31.171487 219229 system_pods.go:89] "kube-proxy-4jxrn" [3e4bdb82-85e5-468b-80dc-0481c990f117] Running
I1129 09:20:31.171502 219229 system_pods.go:89] "kube-scheduler-old-k8s-version-071895" [fe7f98e1-0743-41d8-869a-4807c081f621] Running
I1129 09:20:31.171506 219229 system_pods.go:89] "storage-provisioner" [784fe707-ae15-4eae-a70c-ec084ce3d812] Running
I1129 09:20:31.171514 219229 system_pods.go:126] duration metric: took 1.420361927s to wait for k8s-apps to be running ...
I1129 09:20:31.171522 219229 system_svc.go:44] waiting for kubelet service to be running ....
I1129 09:20:31.171578 219229 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1129 09:20:31.191104 219229 system_svc.go:56] duration metric: took 19.570105ms WaitForService to wait for kubelet
I1129 09:20:31.191198 219229 kubeadm.go:587] duration metric: took 15.659726511s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1129 09:20:31.191233 219229 node_conditions.go:102] verifying NodePressure condition ...
I1129 09:20:31.194404 219229 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1129 09:20:31.194485 219229 node_conditions.go:123] node cpu capacity is 2
I1129 09:20:31.194514 219229 node_conditions.go:105] duration metric: took 3.245952ms to run NodePressure ...
I1129 09:20:31.194558 219229 start.go:242] waiting for startup goroutines ...
I1129 09:20:31.194583 219229 start.go:247] waiting for cluster config update ...
I1129 09:20:31.194611 219229 start.go:256] writing updated cluster config ...
I1129 09:20:31.195146 219229 ssh_runner.go:195] Run: rm -f paused
I1129 09:20:31.201208 219229 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1129 09:20:31.206616 219229 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-htmzr" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.217168 219229 pod_ready.go:94] pod "coredns-5dd5756b68-htmzr" is "Ready"
I1129 09:20:31.217243 219229 pod_ready.go:86] duration metric: took 10.548708ms for pod "coredns-5dd5756b68-htmzr" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.223645 219229 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.234784 219229 pod_ready.go:94] pod "etcd-old-k8s-version-071895" is "Ready"
I1129 09:20:31.234859 219229 pod_ready.go:86] duration metric: took 11.131317ms for pod "etcd-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.248582 219229 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.259407 219229 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-071895" is "Ready"
I1129 09:20:31.259482 219229 pod_ready.go:86] duration metric: took 10.819537ms for pod "kube-apiserver-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.263998 219229 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.606531 219229 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-071895" is "Ready"
I1129 09:20:31.606610 219229 pod_ready.go:86] duration metric: took 342.539937ms for pod "kube-controller-manager-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.808005 219229 pod_ready.go:83] waiting for pod "kube-proxy-4jxrn" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:32.206161 219229 pod_ready.go:94] pod "kube-proxy-4jxrn" is "Ready"
I1129 09:20:32.206190 219229 pod_ready.go:86] duration metric: took 398.137324ms for pod "kube-proxy-4jxrn" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:32.422404 219229 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:32.806577 219229 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-071895" is "Ready"
I1129 09:20:32.806676 219229 pod_ready.go:86] duration metric: took 384.18875ms for pod "kube-scheduler-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:32.806706 219229 pod_ready.go:40] duration metric: took 1.605412666s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1129 09:20:32.883122 219229 start.go:625] kubectl: 1.33.2, cluster: 1.28.0 (minor skew: 5)
I1129 09:20:32.886925 219229 out.go:203]
W1129 09:20:32.889873 219229 out.go:285] ! /usr/local/bin/kubectl is version 1.33.2, which may have incompatibilities with Kubernetes 1.28.0.
I1129 09:20:32.892945 219229 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1129 09:20:32.896883 219229 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-071895" cluster and "default" namespace by default
I1129 09:20:28.381724 222878 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1: (1.074642707s)
I1129 09:20:28.381753 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.34.1 from cache
I1129 09:20:28.381780 222878 containerd.go:285] Loading image: /var/lib/minikube/images/etcd_3.6.4-0
I1129 09:20:28.381828 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0
I1129 09:20:28.381907 222878 ssh_runner.go:235] Completed: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: (1.042136021s)
I1129 09:20:28.381924 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/storage-provisioner_v5: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/storage-provisioner_v5': No such file or directory
I1129 09:20:28.381944 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5 --> /var/lib/minikube/images/storage-provisioner_v5 (8035840 bytes)
I1129 09:20:31.974151 222878 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0: (3.592291332s)
I1129 09:20:31.974192 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.6.4-0 from cache
I1129 09:20:31.974218 222878 containerd.go:285] Loading image: /var/lib/minikube/images/storage-provisioner_v5
I1129 09:20:31.974299 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5
I1129 09:20:32.697903 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
I1129 09:20:32.697943 222878 cache_images.go:125] Successfully loaded all cached images
I1129 09:20:32.697949 222878 cache_images.go:94] duration metric: took 11.878914483s to LoadCachedImages
I1129 09:20:32.697961 222878 kubeadm.go:935] updating node { 192.168.85.2 8443 v1.34.1 containerd true true} ...
I1129 09:20:32.698052 222878 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-230403 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:no-preload-230403 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1129 09:20:32.698117 222878 ssh_runner.go:195] Run: sudo crictl info
I1129 09:20:32.724003 222878 cni.go:84] Creating CNI manager for ""
I1129 09:20:32.724023 222878 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1129 09:20:32.724042 222878 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1129 09:20:32.724064 222878 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-230403 NodeName:no-preload-230403 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodP
ath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1129 09:20:32.724177 222878 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "no-preload-230403"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.85.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1129 09:20:32.724247 222878 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1129 09:20:32.734586 222878 binaries.go:54] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.34.1: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/binaries/v1.34.1': No such file or directory
Initiating transfer...
I1129 09:20:32.734661 222878 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.34.1
I1129 09:20:32.744055 222878 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/arm64/kubectl?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/arm64/kubectl.sha256
I1129 09:20:32.744148 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl
I1129 09:20:32.744244 222878 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/arm64/kubelet?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/arm64/kubelet.sha256
I1129 09:20:32.744287 222878 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1129 09:20:32.744372 222878 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/arm64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/arm64/kubeadm.sha256
I1129 09:20:32.744422 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm
I1129 09:20:32.765160 222878 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubeadm: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubeadm': No such file or directory
I1129 09:20:32.765194 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/linux/arm64/v1.34.1/kubeadm --> /var/lib/minikube/binaries/v1.34.1/kubeadm (71434424 bytes)
I1129 09:20:32.765213 222878 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubectl: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubectl': No such file or directory
I1129 09:20:32.765239 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/linux/arm64/v1.34.1/kubectl --> /var/lib/minikube/binaries/v1.34.1/kubectl (58130616 bytes)
I1129 09:20:32.765317 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet
I1129 09:20:32.779265 222878 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubelet: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubelet': No such file or directory
I1129 09:20:32.779306 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/linux/arm64/v1.34.1/kubelet --> /var/lib/minikube/binaries/v1.34.1/kubelet (56426788 bytes)
I1129 09:20:33.994121 222878 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1129 09:20:34.006964 222878 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (321 bytes)
I1129 09:20:34.022992 222878 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1129 09:20:34.039936 222878 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2230 bytes)
I1129 09:20:34.054478 222878 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I1129 09:20:34.059158 222878 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1129 09:20:34.071443 222878 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:20:34.198077 222878 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1129 09:20:34.225128 222878 certs.go:69] Setting up /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403 for IP: 192.168.85.2
I1129 09:20:34.225153 222878 certs.go:195] generating shared ca certs ...
I1129 09:20:34.225176 222878 certs.go:227] acquiring lock for ca certs: {Name:mke655c14945a8520f2f9de36531df923afb2bda Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:34.225330 222878 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/22000-2317/.minikube/ca.key
I1129 09:20:34.225385 222878 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/22000-2317/.minikube/proxy-client-ca.key
I1129 09:20:34.225397 222878 certs.go:257] generating profile certs ...
I1129 09:20:34.225460 222878 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/client.key
I1129 09:20:34.225477 222878 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/client.crt with IP's: []
I1129 09:20:34.561780 222878 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/client.crt ...
I1129 09:20:34.561812 222878 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/client.crt: {Name:mk0506510be8624c61cf78aca5533a42dbe12049 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:34.562018 222878 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/client.key ...
I1129 09:20:34.562032 222878 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/client.key: {Name:mk7728838f62624078d9f102edcc2e7e92fca24a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:34.562134 222878 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.key.9c37d96b
I1129 09:20:34.562155 222878 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.crt.9c37d96b with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.85.2]
I1129 09:20:35.279064 222878 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.crt.9c37d96b ...
I1129 09:20:35.279097 222878 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.crt.9c37d96b: {Name:mkb8ab5f6d41eda35913c9ea362db6a34366a395 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:35.279295 222878 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.key.9c37d96b ...
I1129 09:20:35.279312 222878 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.key.9c37d96b: {Name:mk21caee54335560e86fdf60eec601c387bdb604 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:35.279403 222878 certs.go:382] copying /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.crt.9c37d96b -> /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.crt
I1129 09:20:35.279483 222878 certs.go:386] copying /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.key.9c37d96b -> /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.key
I1129 09:20:35.279555 222878 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.key
I1129 09:20:35.279573 222878 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.crt with IP's: []
I1129 09:20:35.662938 222878 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.crt ...
I1129 09:20:35.662968 222878 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.crt: {Name:mk84c114a546c4abdb7a044023d46a90cfce8d04 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:35.663145 222878 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.key ...
I1129 09:20:35.663161 222878 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.key: {Name:mk0fc11a967c87ab7d123db8f16798c3182082c0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:35.663352 222878 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/4137.pem (1338 bytes)
W1129 09:20:35.663398 222878 certs.go:480] ignoring /home/jenkins/minikube-integration/22000-2317/.minikube/certs/4137_empty.pem, impossibly tiny 0 bytes
I1129 09:20:35.663418 222878 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca-key.pem (1675 bytes)
I1129 09:20:35.663446 222878 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca.pem (1082 bytes)
I1129 09:20:35.663474 222878 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/cert.pem (1123 bytes)
I1129 09:20:35.663499 222878 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/key.pem (1679 bytes)
I1129 09:20:35.663547 222878 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-2317/.minikube/files/etc/ssl/certs/41372.pem (1708 bytes)
I1129 09:20:35.664157 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1129 09:20:35.691460 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I1129 09:20:35.717525 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1129 09:20:35.745851 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1129 09:20:35.769815 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1129 09:20:35.790501 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1129 09:20:35.812066 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1129 09:20:35.830915 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1129 09:20:35.849395 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/certs/4137.pem --> /usr/share/ca-certificates/4137.pem (1338 bytes)
I1129 09:20:35.872584 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/files/etc/ssl/certs/41372.pem --> /usr/share/ca-certificates/41372.pem (1708 bytes)
I1129 09:20:35.893049 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1129 09:20:35.918494 222878 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1129 09:20:35.936255 222878 ssh_runner.go:195] Run: openssl version
I1129 09:20:35.943518 222878 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/41372.pem && ln -fs /usr/share/ca-certificates/41372.pem /etc/ssl/certs/41372.pem"
I1129 09:20:35.954406 222878 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/41372.pem
I1129 09:20:35.959997 222878 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 29 08:36 /usr/share/ca-certificates/41372.pem
I1129 09:20:35.960085 222878 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/41372.pem
I1129 09:20:36.006091 222878 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/41372.pem /etc/ssl/certs/3ec20f2e.0"
I1129 09:20:36.017475 222878 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1129 09:20:36.027314 222878 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1129 09:20:36.031927 222878 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 29 08:29 /usr/share/ca-certificates/minikubeCA.pem
I1129 09:20:36.031999 222878 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1129 09:20:36.075486 222878 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1129 09:20:36.084604 222878 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/4137.pem && ln -fs /usr/share/ca-certificates/4137.pem /etc/ssl/certs/4137.pem"
I1129 09:20:36.094214 222878 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/4137.pem
I1129 09:20:36.098768 222878 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 29 08:36 /usr/share/ca-certificates/4137.pem
I1129 09:20:36.098840 222878 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/4137.pem
I1129 09:20:36.143207 222878 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/4137.pem /etc/ssl/certs/51391683.0"
I1129 09:20:36.152425 222878 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1129 09:20:36.156708 222878 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1129 09:20:36.156761 222878 kubeadm.go:401] StartCluster: {Name:no-preload-230403 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-230403 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISer
verNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirm
warePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1129 09:20:36.156839 222878 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1129 09:20:36.156905 222878 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1129 09:20:36.184470 222878 cri.go:89] found id: ""
I1129 09:20:36.184537 222878 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1129 09:20:36.193057 222878 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1129 09:20:36.201441 222878 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1129 09:20:36.201527 222878 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1129 09:20:36.210060 222878 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1129 09:20:36.210079 222878 kubeadm.go:158] found existing configuration files:
I1129 09:20:36.210164 222878 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1129 09:20:36.218503 222878 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1129 09:20:36.218590 222878 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1129 09:20:36.226704 222878 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1129 09:20:36.235392 222878 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1129 09:20:36.235519 222878 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1129 09:20:36.243976 222878 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1129 09:20:36.252727 222878 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1129 09:20:36.252802 222878 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1129 09:20:36.261462 222878 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1129 09:20:36.270714 222878 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1129 09:20:36.270782 222878 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1129 09:20:36.278924 222878 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1129 09:20:36.329064 222878 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1129 09:20:36.329252 222878 kubeadm.go:319] [preflight] Running pre-flight checks
I1129 09:20:36.365187 222878 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1129 09:20:36.365275 222878 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I1129 09:20:36.365324 222878 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1129 09:20:36.365388 222878 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1129 09:20:36.365445 222878 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1129 09:20:36.365513 222878 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1129 09:20:36.365576 222878 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1129 09:20:36.365638 222878 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1129 09:20:36.365702 222878 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1129 09:20:36.365769 222878 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1129 09:20:36.365832 222878 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1129 09:20:36.365884 222878 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1129 09:20:36.435193 222878 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1129 09:20:36.435380 222878 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1129 09:20:36.435539 222878 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1129 09:20:36.441349 222878 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1129 09:20:36.446636 222878 out.go:252] - Generating certificates and keys ...
I1129 09:20:36.446799 222878 kubeadm.go:319] [certs] Using existing ca certificate authority
I1129 09:20:36.446906 222878 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1129 09:20:37.362846 222878 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1129 09:20:37.721165 222878 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1129 09:20:37.949639 222878 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1129 09:20:38.413017 222878 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1129 09:20:38.775660 222878 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1129 09:20:38.776186 222878 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost no-preload-230403] and IPs [192.168.85.2 127.0.0.1 ::1]
I1129 09:20:39.104705 222878 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1129 09:20:39.105064 222878 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost no-preload-230403] and IPs [192.168.85.2 127.0.0.1 ::1]
I1129 09:20:39.359331 222878 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1129 09:20:39.818423 222878 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1129 09:20:39.880381 222878 kubeadm.go:319] [certs] Generating "sa" key and public key
I1129 09:20:39.880638 222878 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1129 09:20:41.216161 222878 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1129 09:20:42.199207 222878 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1129 09:20:42.918813 222878 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1129 09:20:43.410581 222878 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1129 09:20:43.826978 222878 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1129 09:20:43.827675 222878 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1129 09:20:43.830453 222878 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
b9e829b9abde5 1611cd07b61d5 8 seconds ago Running busybox 0 ddd79130415cc busybox default
f8f1e6dc2605a 97e04611ad434 16 seconds ago Running coredns 0 0f3ce8e6c4105 coredns-5dd5756b68-htmzr kube-system
359d9432ef497 ba04bb24b9575 16 seconds ago Running storage-provisioner 0 66250dabca2c6 storage-provisioner kube-system
db1d77c6c85ea b1a8c6f707935 27 seconds ago Running kindnet-cni 0 78bf9329ff249 kindnet-58g5f kube-system
000a8de26034d 940f54a5bcae9 30 seconds ago Running kube-proxy 0 ec6c1087a251f kube-proxy-4jxrn kube-system
c6e9c9ab04ae1 46cc66ccc7c19 51 seconds ago Running kube-controller-manager 0 16b3e81e696c9 kube-controller-manager-old-k8s-version-071895 kube-system
41dff26eb8e67 9cdd6470f48c8 51 seconds ago Running etcd 0 468f2a4d8c24a etcd-old-k8s-version-071895 kube-system
d34a4ced6121d 00543d2fe5d71 52 seconds ago Running kube-apiserver 0 9630ead47757e kube-apiserver-old-k8s-version-071895 kube-system
7c5e9c05d20b8 762dce4090c5f 52 seconds ago Running kube-scheduler 0 676bacb96168a kube-scheduler-old-k8s-version-071895 kube-system
==> containerd <==
Nov 29 09:20:29 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:29.985394384Z" level=info msg="connecting to shim 359d9432ef4979d387512d5a2a5a3cd9fb7a0987f4a3540a23407b70f7faf163" address="unix:///run/containerd/s/34373f541c51fce0619cd6b7f9bbe560b47e8c8788713a29595219a5d22d901b" protocol=ttrpc version=3
Nov 29 09:20:29 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:29.992937298Z" level=info msg="CreateContainer within sandbox \"0f3ce8e6c41050910070bab1a2edce113b2eb3bd98f3bca1d8006c18bcd1714f\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Nov 29 09:20:30 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:30.071685071Z" level=info msg="Container f8f1e6dc2605a052d9e0af268d75e52d11eef09c6da328c174daa4346e21359d: CDI devices from CRI Config.CDIDevices: []"
Nov 29 09:20:30 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:30.095345483Z" level=info msg="CreateContainer within sandbox \"0f3ce8e6c41050910070bab1a2edce113b2eb3bd98f3bca1d8006c18bcd1714f\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"f8f1e6dc2605a052d9e0af268d75e52d11eef09c6da328c174daa4346e21359d\""
Nov 29 09:20:30 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:30.097739089Z" level=info msg="StartContainer for \"f8f1e6dc2605a052d9e0af268d75e52d11eef09c6da328c174daa4346e21359d\""
Nov 29 09:20:30 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:30.099238569Z" level=info msg="connecting to shim f8f1e6dc2605a052d9e0af268d75e52d11eef09c6da328c174daa4346e21359d" address="unix:///run/containerd/s/3150843ad07ed5a21377bb0ba6fe93d3c73033d9ccfa3b4a9e0ed16a5e8438c5" protocol=ttrpc version=3
Nov 29 09:20:30 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:30.189374834Z" level=info msg="StartContainer for \"359d9432ef4979d387512d5a2a5a3cd9fb7a0987f4a3540a23407b70f7faf163\" returns successfully"
Nov 29 09:20:30 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:30.269744369Z" level=info msg="StartContainer for \"f8f1e6dc2605a052d9e0af268d75e52d11eef09c6da328c174daa4346e21359d\" returns successfully"
Nov 29 09:20:35 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:35.534277133Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:3abcbd08-d7c4-4a13-b94c-6f6424975411,Namespace:default,Attempt:0,}"
Nov 29 09:20:35 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:35.597130515Z" level=info msg="connecting to shim ddd79130415cc8649c69caccfc081affa5f1da8a2517127cdbcf8d824a791490" address="unix:///run/containerd/s/6d78da511a42142891dae64b3eb6a171701a2aacf243055415398ac4ec21cd7a" namespace=k8s.io protocol=ttrpc version=3
Nov 29 09:20:35 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:35.703469012Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:3abcbd08-d7c4-4a13-b94c-6f6424975411,Namespace:default,Attempt:0,} returns sandbox id \"ddd79130415cc8649c69caccfc081affa5f1da8a2517127cdbcf8d824a791490\""
Nov 29 09:20:35 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:35.712136437Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.805646978Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.808907002Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=1937188"
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.812726259Z" level=info msg="ImageCreate event name:\"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.815034818Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.816002472Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"1935750\" in 2.103636897s"
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.816153291Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\""
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.820479635Z" level=info msg="CreateContainer within sandbox \"ddd79130415cc8649c69caccfc081affa5f1da8a2517127cdbcf8d824a791490\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.837353396Z" level=info msg="Container b9e829b9abde5402e2cbe089579fccb3fcaa2d4225461d6d9fe9bceddbff0c20: CDI devices from CRI Config.CDIDevices: []"
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.849004626Z" level=info msg="CreateContainer within sandbox \"ddd79130415cc8649c69caccfc081affa5f1da8a2517127cdbcf8d824a791490\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"b9e829b9abde5402e2cbe089579fccb3fcaa2d4225461d6d9fe9bceddbff0c20\""
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.852339424Z" level=info msg="StartContainer for \"b9e829b9abde5402e2cbe089579fccb3fcaa2d4225461d6d9fe9bceddbff0c20\""
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.855127486Z" level=info msg="connecting to shim b9e829b9abde5402e2cbe089579fccb3fcaa2d4225461d6d9fe9bceddbff0c20" address="unix:///run/containerd/s/6d78da511a42142891dae64b3eb6a171701a2aacf243055415398ac4ec21cd7a" protocol=ttrpc version=3
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.962061310Z" level=info msg="StartContainer for \"b9e829b9abde5402e2cbe089579fccb3fcaa2d4225461d6d9fe9bceddbff0c20\" returns successfully"
Nov 29 09:20:44 old-k8s-version-071895 containerd[758]: E1129 09:20:44.932672 758 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [f8f1e6dc2605a052d9e0af268d75e52d11eef09c6da328c174daa4346e21359d] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = b7aacdf6a6aa730aafe4d018cac9b7b5ecfb346cba84a99f64521f87aef8b4958639c1cf97967716465791d05bd38f372615327b7cb1d93c850bae532744d54d
CoreDNS-1.10.1
linux/arm64, go1.20, 055b2c3
[INFO] 127.0.0.1:51515 - 3634 "HINFO IN 3397046818821823914.8081764445601178770. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.005882235s
==> describe nodes <==
Name: old-k8s-version-071895
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=old-k8s-version-071895
kubernetes.io/os=linux
minikube.k8s.io/commit=d0eb20ec824c82ab3f24099c8b785e0a2a5789af
minikube.k8s.io/name=old-k8s-version-071895
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_29T09_20_04_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sat, 29 Nov 2025 09:19:58 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-071895
AcquireTime: <unset>
RenewTime: Sat, 29 Nov 2025 09:20:43 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sat, 29 Nov 2025 09:20:33 +0000 Sat, 29 Nov 2025 09:19:55 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 29 Nov 2025 09:20:33 +0000 Sat, 29 Nov 2025 09:19:55 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 29 Nov 2025 09:20:33 +0000 Sat, 29 Nov 2025 09:19:55 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sat, 29 Nov 2025 09:20:33 +0000 Sat, 29 Nov 2025 09:20:29 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: old-k8s-version-071895
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 7283ea1857f18f20a875c29069214c9d
System UUID: 453a3f46-be9b-4440-b54b-7bd5b2275c63
Boot ID: 6647f078-4edd-40c5-9d0e-49eb5ed00bd7
Kernel Version: 5.15.0-1084-aws
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13s
kube-system coredns-5dd5756b68-htmzr 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 31s
kube-system etcd-old-k8s-version-071895 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 43s
kube-system kindnet-58g5f 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 31s
kube-system kube-apiserver-old-k8s-version-071895 250m (12%) 0 (0%) 0 (0%) 0 (0%) 47s
kube-system kube-controller-manager-old-k8s-version-071895 200m (10%) 0 (0%) 0 (0%) 0 (0%) 43s
kube-system kube-proxy-4jxrn 0 (0%) 0 (0%) 0 (0%) 0 (0%) 31s
kube-system kube-scheduler-old-k8s-version-071895 100m (5%) 0 (0%) 0 (0%) 0 (0%) 43s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 29s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 100m (5%)
memory 220Mi (2%) 220Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 30s kube-proxy
Normal Starting 44s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 44s kubelet Node old-k8s-version-071895 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 44s kubelet Node old-k8s-version-071895 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 44s kubelet Node old-k8s-version-071895 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 43s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 32s node-controller Node old-k8s-version-071895 event: Registered Node old-k8s-version-071895 in Controller
Normal NodeReady 17s kubelet Node old-k8s-version-071895 status is now: NodeReady
==> dmesg <==
[Nov29 08:17] ACPI: SRAT not present
[ +0.000000] ACPI: SRAT not present
[ +0.000000] SPI driver altr_a10sr has no spi_device_id for altr,a10sr
[ +0.014634] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.570975] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.032231] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +0.767655] ena 0000:00:05.0: LLQ is not supported Fallback to host mode policy.
[ +6.282538] kauditd_printk_skb: 36 callbacks suppressed
[Nov29 08:39] FS-Cache: Duplicate cookie detected
[ +0.000727] FS-Cache: O-cookie c=00000013 [p=00000002 fl=222 nc=0 na=1]
[ +0.001077] FS-Cache: O-cookie d=00000000b08097f7{9P.session} n=00000000a17ba85f
[ +0.001074] FS-Cache: O-key=[10] '34323935323231393134'
[ +0.000776] FS-Cache: N-cookie c=00000014 [p=00000002 fl=2 nc=0 na=1]
[ +0.000954] FS-Cache: N-cookie d=00000000b08097f7{9P.session} n=00000000534469ad
[ +0.001092] FS-Cache: N-key=[10] '34323935323231393134'
[Nov29 09:19] hrtimer: interrupt took 12545193 ns
==> etcd [41dff26eb8e679cc29a87f83f59d117073bdaeb9ac41cb8ac8ee1cb32c92511a] <==
{"level":"info","ts":"2025-11-29T09:19:54.897611Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-11-29T09:19:54.901566Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"ea7e25599daad906","initial-advertise-peer-urls":["https://192.168.76.2:2380"],"listen-peer-urls":["https://192.168.76.2:2380"],"advertise-client-urls":["https://192.168.76.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.76.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-29T09:19:54.901625Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-29T09:19:55.060661Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-29T09:19:55.060785Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-29T09:19:55.060882Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgPreVoteResp from ea7e25599daad906 at term 1"}
{"level":"info","ts":"2025-11-29T09:19:55.060949Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became candidate at term 2"}
{"level":"info","ts":"2025-11-29T09:19:55.060985Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgVoteResp from ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-29T09:19:55.061056Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became leader at term 2"}
{"level":"info","ts":"2025-11-29T09:19:55.06113Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: ea7e25599daad906 elected leader ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-29T09:19:55.062447Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"ea7e25599daad906","local-member-attributes":"{Name:old-k8s-version-071895 ClientURLs:[https://192.168.76.2:2379]}","request-path":"/0/members/ea7e25599daad906/attributes","cluster-id":"6f20f2c4b2fb5f8a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-29T09:19:55.062536Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-29T09:19:55.063797Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-29T09:19:55.063991Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:19:55.065852Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-29T09:19:55.066951Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.76.2:2379"}
{"level":"info","ts":"2025-11-29T09:19:55.067534Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:19:55.067717Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:19:55.071793Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:19:55.071959Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-29T09:19:55.072006Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-29T09:20:15.052803Z","caller":"traceutil/trace.go:171","msg":"trace[25407896] transaction","detail":"{read_only:false; response_revision:297; number_of_response:1; }","duration":"106.818617ms","start":"2025-11-29T09:20:14.945956Z","end":"2025-11-29T09:20:15.052774Z","steps":["trace[25407896] 'process raft request' (duration: 106.616925ms)"],"step_count":1}
{"level":"info","ts":"2025-11-29T09:20:15.064957Z","caller":"traceutil/trace.go:171","msg":"trace[1542162002] transaction","detail":"{read_only:false; response_revision:300; number_of_response:1; }","duration":"106.599802ms","start":"2025-11-29T09:20:14.95834Z","end":"2025-11-29T09:20:15.064939Z","steps":["trace[1542162002] 'process raft request' (duration: 106.563165ms)"],"step_count":1}
{"level":"info","ts":"2025-11-29T09:20:15.065342Z","caller":"traceutil/trace.go:171","msg":"trace[758518492] transaction","detail":"{read_only:false; response_revision:298; number_of_response:1; }","duration":"119.137568ms","start":"2025-11-29T09:20:14.946194Z","end":"2025-11-29T09:20:15.065332Z","steps":["trace[758518492] 'process raft request' (duration: 118.584375ms)"],"step_count":1}
{"level":"info","ts":"2025-11-29T09:20:15.065438Z","caller":"traceutil/trace.go:171","msg":"trace[2009828336] transaction","detail":"{read_only:false; response_revision:299; number_of_response:1; }","duration":"112.325548ms","start":"2025-11-29T09:20:14.953105Z","end":"2025-11-29T09:20:15.065431Z","steps":["trace[2009828336] 'process raft request' (duration: 111.76593ms)"],"step_count":1}
==> kernel <==
09:20:46 up 1:03, 0 user, load average: 2.65, 2.59, 2.59
Linux old-k8s-version-071895 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [db1d77c6c85eaf5ebd7dc839fb54d40271ee80c34795b249a47534f35c064f1c] <==
I1129 09:20:19.083145 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1129 09:20:19.083520 1 main.go:139] hostIP = 192.168.76.2
podIP = 192.168.76.2
I1129 09:20:19.083647 1 main.go:148] setting mtu 1500 for CNI
I1129 09:20:19.083659 1 main.go:178] kindnetd IP family: "ipv4"
I1129 09:20:19.083671 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-29T09:20:19Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1129 09:20:19.286160 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1129 09:20:19.286239 1 controller.go:381] "Waiting for informer caches to sync"
I1129 09:20:19.286373 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1129 09:20:19.287882 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1129 09:20:19.580767 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1129 09:20:19.580802 1 metrics.go:72] Registering metrics
I1129 09:20:19.580865 1 controller.go:711] "Syncing nftables rules"
I1129 09:20:29.287220 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1129 09:20:29.287264 1 main.go:301] handling current node
I1129 09:20:39.286004 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1129 09:20:39.286281 1 main.go:301] handling current node
==> kube-apiserver [d34a4ced6121deea5f0e58655a9a45e86fccdde412c9acf3d1e35ab330cd1b4b] <==
I1129 09:19:58.687723 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1129 09:19:58.689876 1 apf_controller.go:377] Running API Priority and Fairness config worker
I1129 09:19:58.689902 1 apf_controller.go:380] Running API Priority and Fairness periodic rebalancing process
I1129 09:19:58.690079 1 cache.go:39] Caches are synced for AvailableConditionController controller
I1129 09:19:58.691126 1 aggregator.go:166] initial CRD sync complete...
I1129 09:19:58.691143 1 autoregister_controller.go:141] Starting autoregister controller
I1129 09:19:58.691150 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1129 09:19:58.691158 1 cache.go:39] Caches are synced for autoregister controller
E1129 09:19:58.752402 1 controller.go:145] while syncing ConfigMap "kube-system/kube-apiserver-legacy-service-account-token-tracking", err: namespaces "kube-system" not found
I1129 09:19:58.885509 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1129 09:19:59.184340 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1129 09:19:59.194717 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1129 09:19:59.195065 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1129 09:20:00.545658 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1129 09:20:00.693098 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1129 09:20:00.863619 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1129 09:20:00.877801 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I1129 09:20:00.879300 1 controller.go:624] quota admission added evaluator for: endpoints
I1129 09:20:00.885677 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1129 09:20:01.758115 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1129 09:20:02.382429 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1129 09:20:02.396930 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1129 09:20:02.411199 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1129 09:20:15.297358 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1129 09:20:15.463834 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [c6e9c9ab04ae16e634fbb9b4e1d16587356b43ecc4799412da2e56e79409870b] <==
I1129 09:20:15.111764 1 event.go:307] "Event occurred" object="kube-system/kube-scheduler-old-k8s-version-071895" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1129 09:20:15.174792 1 event.go:307] "Event occurred" object="kube-system/kube-apiserver-old-k8s-version-071895" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1129 09:20:15.320980 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1129 09:20:15.351255 1 shared_informer.go:318] Caches are synced for garbage collector
I1129 09:20:15.351286 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1129 09:20:15.384164 1 shared_informer.go:318] Caches are synced for garbage collector
I1129 09:20:15.486462 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-4jxrn"
I1129 09:20:15.486489 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-58g5f"
I1129 09:20:15.643761 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-rk2xx"
I1129 09:20:15.661237 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-htmzr"
I1129 09:20:15.701868 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="389.914526ms"
I1129 09:20:15.744722 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="42.452889ms"
I1129 09:20:15.746651 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="83.963µs"
I1129 09:20:17.246540 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1129 09:20:17.300225 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-rk2xx"
I1129 09:20:17.312673 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="66.947307ms"
I1129 09:20:17.322261 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="9.52965ms"
I1129 09:20:17.323333 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="806.512µs"
I1129 09:20:29.431259 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="83.143µs"
I1129 09:20:29.490111 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="119.681µs"
I1129 09:20:30.130582 1 event.go:307] "Event occurred" object="kube-system/storage-provisioner" fieldPath="" kind="Pod" apiVersion="" type="Normal" reason="TaintManagerEviction" message="Cancelling deletion of Pod kube-system/storage-provisioner"
I1129 09:20:30.130619 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68-htmzr" fieldPath="" kind="Pod" apiVersion="" type="Normal" reason="TaintManagerEviction" message="Cancelling deletion of Pod kube-system/coredns-5dd5756b68-htmzr"
I1129 09:20:30.131138 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
I1129 09:20:31.018335 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="24.226889ms"
I1129 09:20:31.018459 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="77.794µs"
==> kube-proxy [000a8de26034dcdc6da38237d77f79fa914b3088e593f0bbd13e14b39b42bf00] <==
I1129 09:20:16.555876 1 server_others.go:69] "Using iptables proxy"
I1129 09:20:16.579548 1 node.go:141] Successfully retrieved node IP: 192.168.76.2
I1129 09:20:16.643168 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1129 09:20:16.645058 1 server_others.go:152] "Using iptables Proxier"
I1129 09:20:16.645109 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1129 09:20:16.645128 1 server_others.go:438] "Defaulting to no-op detect-local"
I1129 09:20:16.645164 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1129 09:20:16.645384 1 server.go:846] "Version info" version="v1.28.0"
I1129 09:20:16.645401 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1129 09:20:16.657042 1 config.go:188] "Starting service config controller"
I1129 09:20:16.657067 1 shared_informer.go:311] Waiting for caches to sync for service config
I1129 09:20:16.657128 1 config.go:97] "Starting endpoint slice config controller"
I1129 09:20:16.657132 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1129 09:20:16.657163 1 config.go:315] "Starting node config controller"
I1129 09:20:16.657166 1 shared_informer.go:311] Waiting for caches to sync for node config
I1129 09:20:16.757328 1 shared_informer.go:318] Caches are synced for node config
I1129 09:20:16.757472 1 shared_informer.go:318] Caches are synced for endpoint slice config
I1129 09:20:16.757514 1 shared_informer.go:318] Caches are synced for service config
==> kube-scheduler [7c5e9c05d20b870a1e96cdb0bbf1479f013609a2bbcde73ff5f9b106d4a35049] <==
I1129 09:19:58.666858 1 serving.go:348] Generated self-signed cert in-memory
W1129 09:20:00.321955 1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W1129 09:20:00.322235 1 authentication.go:368] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W1129 09:20:00.322326 1 authentication.go:369] Continuing without authentication configuration. This may treat all requests as anonymous.
W1129 09:20:00.322411 1 authentication.go:370] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I1129 09:20:00.396927 1 server.go:154] "Starting Kubernetes Scheduler" version="v1.28.0"
I1129 09:20:00.399854 1 server.go:156] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1129 09:20:00.419574 1 secure_serving.go:210] Serving securely on 127.0.0.1:10259
I1129 09:20:00.431997 1 configmap_cafile_content.go:202] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1129 09:20:00.432131 1 shared_informer.go:311] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I1129 09:20:00.432227 1 tlsconfig.go:240] "Starting DynamicServingCertificateController"
W1129 09:20:00.482293 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1129 09:20:00.482341 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
I1129 09:20:01.932942 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 29 09:20:15 old-k8s-version-071895 kubelet[1545]: I1129 09:20:15.542826 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/3e4bdb82-85e5-468b-80dc-0481c990f117-kube-proxy\") pod \"kube-proxy-4jxrn\" (UID: \"3e4bdb82-85e5-468b-80dc-0481c990f117\") " pod="kube-system/kube-proxy-4jxrn"
Nov 29 09:20:15 old-k8s-version-071895 kubelet[1545]: I1129 09:20:15.542946 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/d4743cee-0834-4a44-9cf7-d0228aa5b843-cni-cfg\") pod \"kindnet-58g5f\" (UID: \"d4743cee-0834-4a44-9cf7-d0228aa5b843\") " pod="kube-system/kindnet-58g5f"
Nov 29 09:20:15 old-k8s-version-071895 kubelet[1545]: I1129 09:20:15.543093 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/d4743cee-0834-4a44-9cf7-d0228aa5b843-xtables-lock\") pod \"kindnet-58g5f\" (UID: \"d4743cee-0834-4a44-9cf7-d0228aa5b843\") " pod="kube-system/kindnet-58g5f"
Nov 29 09:20:15 old-k8s-version-071895 kubelet[1545]: I1129 09:20:15.543236 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcrqh\" (UniqueName: \"kubernetes.io/projected/3e4bdb82-85e5-468b-80dc-0481c990f117-kube-api-access-zcrqh\") pod \"kube-proxy-4jxrn\" (UID: \"3e4bdb82-85e5-468b-80dc-0481c990f117\") " pod="kube-system/kube-proxy-4jxrn"
Nov 29 09:20:15 old-k8s-version-071895 kubelet[1545]: I1129 09:20:15.543388 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d4743cee-0834-4a44-9cf7-d0228aa5b843-lib-modules\") pod \"kindnet-58g5f\" (UID: \"d4743cee-0834-4a44-9cf7-d0228aa5b843\") " pod="kube-system/kindnet-58g5f"
Nov 29 09:20:15 old-k8s-version-071895 kubelet[1545]: I1129 09:20:15.543527 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfjbl\" (UniqueName: \"kubernetes.io/projected/d4743cee-0834-4a44-9cf7-d0228aa5b843-kube-api-access-hfjbl\") pod \"kindnet-58g5f\" (UID: \"d4743cee-0834-4a44-9cf7-d0228aa5b843\") " pod="kube-system/kindnet-58g5f"
Nov 29 09:20:16 old-k8s-version-071895 kubelet[1545]: I1129 09:20:16.904236 1545 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-4jxrn" podStartSLOduration=1.904182809 podCreationTimestamp="2025-11-29 09:20:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 09:20:16.903944744 +0000 UTC m=+14.574033893" watchObservedRunningTime="2025-11-29 09:20:16.904182809 +0000 UTC m=+14.574271949"
Nov 29 09:20:22 old-k8s-version-071895 kubelet[1545]: I1129 09:20:22.690149 1545 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-58g5f" podStartSLOduration=5.068996977 podCreationTimestamp="2025-11-29 09:20:15 +0000 UTC" firstStartedPulling="2025-11-29 09:20:16.131889821 +0000 UTC m=+13.801978953" lastFinishedPulling="2025-11-29 09:20:18.75299704 +0000 UTC m=+16.423086171" observedRunningTime="2025-11-29 09:20:19.919717563 +0000 UTC m=+17.589806703" watchObservedRunningTime="2025-11-29 09:20:22.690104195 +0000 UTC m=+20.360193335"
Nov 29 09:20:29 old-k8s-version-071895 kubelet[1545]: I1129 09:20:29.372571 1545 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 29 09:20:29 old-k8s-version-071895 kubelet[1545]: I1129 09:20:29.424392 1545 topology_manager.go:215] "Topology Admit Handler" podUID="784fe707-ae15-4eae-a70c-ec084ce3d812" podNamespace="kube-system" podName="storage-provisioner"
Nov 29 09:20:29 old-k8s-version-071895 kubelet[1545]: I1129 09:20:29.431465 1545 topology_manager.go:215] "Topology Admit Handler" podUID="c6b5f2ee-df4f-40a3-be2e-6f16e858e497" podNamespace="kube-system" podName="coredns-5dd5756b68-htmzr"
Nov 29 09:20:29 old-k8s-version-071895 kubelet[1545]: I1129 09:20:29.459512 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/784fe707-ae15-4eae-a70c-ec084ce3d812-tmp\") pod \"storage-provisioner\" (UID: \"784fe707-ae15-4eae-a70c-ec084ce3d812\") " pod="kube-system/storage-provisioner"
Nov 29 09:20:29 old-k8s-version-071895 kubelet[1545]: I1129 09:20:29.459744 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzcr9\" (UniqueName: \"kubernetes.io/projected/784fe707-ae15-4eae-a70c-ec084ce3d812-kube-api-access-hzcr9\") pod \"storage-provisioner\" (UID: \"784fe707-ae15-4eae-a70c-ec084ce3d812\") " pod="kube-system/storage-provisioner"
Nov 29 09:20:29 old-k8s-version-071895 kubelet[1545]: I1129 09:20:29.459885 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch9tz\" (UniqueName: \"kubernetes.io/projected/c6b5f2ee-df4f-40a3-be2e-6f16e858e497-kube-api-access-ch9tz\") pod \"coredns-5dd5756b68-htmzr\" (UID: \"c6b5f2ee-df4f-40a3-be2e-6f16e858e497\") " pod="kube-system/coredns-5dd5756b68-htmzr"
Nov 29 09:20:29 old-k8s-version-071895 kubelet[1545]: I1129 09:20:29.460022 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c6b5f2ee-df4f-40a3-be2e-6f16e858e497-config-volume\") pod \"coredns-5dd5756b68-htmzr\" (UID: \"c6b5f2ee-df4f-40a3-be2e-6f16e858e497\") " pod="kube-system/coredns-5dd5756b68-htmzr"
Nov 29 09:20:30 old-k8s-version-071895 kubelet[1545]: I1129 09:20:30.997910 1545 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=13.997856203 podCreationTimestamp="2025-11-29 09:20:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 09:20:30.970917027 +0000 UTC m=+28.641006167" watchObservedRunningTime="2025-11-29 09:20:30.997856203 +0000 UTC m=+28.667945343"
Nov 29 09:20:33 old-k8s-version-071895 kubelet[1545]: I1129 09:20:33.708750 1545 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-htmzr" podStartSLOduration=18.708653504 podCreationTimestamp="2025-11-29 09:20:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 09:20:30.99830195 +0000 UTC m=+28.668391090" watchObservedRunningTime="2025-11-29 09:20:33.708653504 +0000 UTC m=+31.378742653"
Nov 29 09:20:33 old-k8s-version-071895 kubelet[1545]: I1129 09:20:33.709581 1545 topology_manager.go:215] "Topology Admit Handler" podUID="3abcbd08-d7c4-4a13-b94c-6f6424975411" podNamespace="default" podName="busybox"
Nov 29 09:20:33 old-k8s-version-071895 kubelet[1545]: W1129 09:20:33.759772 1545 reflector.go:535] object-"default"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:old-k8s-version-071895" cannot list resource "configmaps" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-071895' and this object
Nov 29 09:20:33 old-k8s-version-071895 kubelet[1545]: E1129 09:20:33.759821 1545 reflector.go:147] object-"default"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:old-k8s-version-071895" cannot list resource "configmaps" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-071895' and this object
Nov 29 09:20:33 old-k8s-version-071895 kubelet[1545]: I1129 09:20:33.794129 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7w6jg\" (UniqueName: \"kubernetes.io/projected/3abcbd08-d7c4-4a13-b94c-6f6424975411-kube-api-access-7w6jg\") pod \"busybox\" (UID: \"3abcbd08-d7c4-4a13-b94c-6f6424975411\") " pod="default/busybox"
Nov 29 09:20:34 old-k8s-version-071895 kubelet[1545]: E1129 09:20:34.906850 1545 projected.go:292] Couldn't get configMap default/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition
Nov 29 09:20:34 old-k8s-version-071895 kubelet[1545]: E1129 09:20:34.908357 1545 projected.go:198] Error preparing data for projected volume kube-api-access-7w6jg for pod default/busybox: failed to sync configmap cache: timed out waiting for the condition
Nov 29 09:20:34 old-k8s-version-071895 kubelet[1545]: E1129 09:20:34.908523 1545 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3abcbd08-d7c4-4a13-b94c-6f6424975411-kube-api-access-7w6jg podName:3abcbd08-d7c4-4a13-b94c-6f6424975411 nodeName:}" failed. No retries permitted until 2025-11-29 09:20:35.408496185 +0000 UTC m=+33.078585316 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-7w6jg" (UniqueName: "kubernetes.io/projected/3abcbd08-d7c4-4a13-b94c-6f6424975411-kube-api-access-7w6jg") pod "busybox" (UID: "3abcbd08-d7c4-4a13-b94c-6f6424975411") : failed to sync configmap cache: timed out waiting for the condition
Nov 29 09:20:37 old-k8s-version-071895 kubelet[1545]: I1129 09:20:37.992486 1545 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=2.8817945099999998 podCreationTimestamp="2025-11-29 09:20:33 +0000 UTC" firstStartedPulling="2025-11-29 09:20:35.706009491 +0000 UTC m=+33.376098623" lastFinishedPulling="2025-11-29 09:20:37.816649729 +0000 UTC m=+35.486738869" observedRunningTime="2025-11-29 09:20:37.991952135 +0000 UTC m=+35.662041292" watchObservedRunningTime="2025-11-29 09:20:37.992434756 +0000 UTC m=+35.662523896"
==> storage-provisioner [359d9432ef4979d387512d5a2a5a3cd9fb7a0987f4a3540a23407b70f7faf163] <==
I1129 09:20:30.214942 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1129 09:20:30.235967 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1129 09:20:30.236210 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1129 09:20:30.252227 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1129 09:20:30.255628 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-071895_105725d4-e591-4aa3-af10-2659a9fed2c2!
I1129 09:20:30.273258 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"d8dbb900-fced-4c3d-a6ea-15b88c536670", APIVersion:"v1", ResourceVersion:"415", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-071895_105725d4-e591-4aa3-af10-2659a9fed2c2 became leader
I1129 09:20:30.355956 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-071895_105725d4-e591-4aa3-af10-2659a9fed2c2!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-071895 -n old-k8s-version-071895
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-071895 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-071895
helpers_test.go:243: (dbg) docker inspect old-k8s-version-071895:
-- stdout --
[
{
"Id": "cb39490005387f20e45d85449d7cd3926a38c4f6954c93fdb4e9a9d8c1dd56c0",
"Created": "2025-11-29T09:19:35.843753446Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 219639,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-29T09:19:35.922684387Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:572c983e466f1f784136812eef5cc59ac623db764bc7704d3676c4643993fd08",
"ResolvConfPath": "/var/lib/docker/containers/cb39490005387f20e45d85449d7cd3926a38c4f6954c93fdb4e9a9d8c1dd56c0/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/cb39490005387f20e45d85449d7cd3926a38c4f6954c93fdb4e9a9d8c1dd56c0/hostname",
"HostsPath": "/var/lib/docker/containers/cb39490005387f20e45d85449d7cd3926a38c4f6954c93fdb4e9a9d8c1dd56c0/hosts",
"LogPath": "/var/lib/docker/containers/cb39490005387f20e45d85449d7cd3926a38c4f6954c93fdb4e9a9d8c1dd56c0/cb39490005387f20e45d85449d7cd3926a38c4f6954c93fdb4e9a9d8c1dd56c0-json.log",
"Name": "/old-k8s-version-071895",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-071895:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "old-k8s-version-071895",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "cb39490005387f20e45d85449d7cd3926a38c4f6954c93fdb4e9a9d8c1dd56c0",
"LowerDir": "/var/lib/docker/overlay2/39dddc1dab2647088ef22e0a22ddfff676f8c9bdc540988436a11252cc093aa5-init/diff:/var/lib/docker/overlay2/fc2ab0019906b90b3f033fa414f560878b73f7ff0ebdf77a0b554a40813009d9/diff",
"MergedDir": "/var/lib/docker/overlay2/39dddc1dab2647088ef22e0a22ddfff676f8c9bdc540988436a11252cc093aa5/merged",
"UpperDir": "/var/lib/docker/overlay2/39dddc1dab2647088ef22e0a22ddfff676f8c9bdc540988436a11252cc093aa5/diff",
"WorkDir": "/var/lib/docker/overlay2/39dddc1dab2647088ef22e0a22ddfff676f8c9bdc540988436a11252cc093aa5/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-071895",
"Source": "/var/lib/docker/volumes/old-k8s-version-071895/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-071895",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-071895",
"name.minikube.sigs.k8s.io": "old-k8s-version-071895",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "60a614c2d74d8f721c5d191b45e8f8728a313afe9d5488b154acf3a0ac189fb9",
"SandboxKey": "/var/run/docker/netns/60a614c2d74d",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33053"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33054"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33057"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33055"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33056"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"old-k8s-version-071895": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "56:be:6c:06:cc:ee",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "46e34ec2f3d70587bfaede542f848856d8f0dbb2dcdc34fe102884ad13766b95",
"EndpointID": "2663a5dbde2357e0d7269cf1f8d9d8bb11ffe6e49aa8754901238cb93acbbf02",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-071895",
"cb3949000538"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-071895 -n old-k8s-version-071895
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p old-k8s-version-071895 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p old-k8s-version-071895 logs -n 25: (1.675309086s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────┬─────────┬─────────┬─────────────────────┬────────────
─────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────┼─────────┼─────────┼─────────────────────┼────────────
─────────┤
│ ssh │ -p cilium-420729 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo cri-dockerd --version │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo systemctl status containerd --all --full --no-pager │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo systemctl cat containerd --no-pager │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo cat /lib/systemd/system/containerd.service │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo cat /etc/containerd/config.toml │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo containerd config dump │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo systemctl status crio --all --full --no-pager │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo systemctl cat crio --no-pager │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ ssh │ -p cilium-420729 sudo crio config │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ │
│ delete │ -p cilium-420729 │ cilium-420729 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ 29 Nov 25 09:15 UTC │
│ start │ -p force-systemd-env-559836 --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-env-559836 │ jenkins │ v1.37.0 │ 29 Nov 25 09:15 UTC │ 29 Nov 25 09:16 UTC │
│ ssh │ force-systemd-env-559836 ssh cat /etc/containerd/config.toml │ force-systemd-env-559836 │ jenkins │ v1.37.0 │ 29 Nov 25 09:16 UTC │ 29 Nov 25 09:16 UTC │
│ delete │ -p force-systemd-env-559836 │ force-systemd-env-559836 │ jenkins │ v1.37.0 │ 29 Nov 25 09:16 UTC │ 29 Nov 25 09:16 UTC │
│ start │ -p cert-expiration-592440 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=containerd │ cert-expiration-592440 │ jenkins │ v1.37.0 │ 29 Nov 25 09:16 UTC │ 29 Nov 25 09:17 UTC │
│ delete │ -p running-upgrade-115889 │ running-upgrade-115889 │ jenkins │ v1.37.0 │ 29 Nov 25 09:18 UTC │ 29 Nov 25 09:18 UTC │
│ start │ -p cert-options-515442 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-515442 │ jenkins │ v1.37.0 │ 29 Nov 25 09:18 UTC │ 29 Nov 25 09:19 UTC │
│ ssh │ cert-options-515442 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-515442 │ jenkins │ v1.37.0 │ 29 Nov 25 09:19 UTC │ 29 Nov 25 09:19 UTC │
│ ssh │ -p cert-options-515442 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-515442 │ jenkins │ v1.37.0 │ 29 Nov 25 09:19 UTC │ 29 Nov 25 09:19 UTC │
│ delete │ -p cert-options-515442 │ cert-options-515442 │ jenkins │ v1.37.0 │ 29 Nov 25 09:19 UTC │ 29 Nov 25 09:19 UTC │
│ start │ -p old-k8s-version-071895 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-071895 │ jenkins │ v1.37.0 │ 29 Nov 25 09:19 UTC │ 29 Nov 25 09:20 UTC │
│ start │ -p cert-expiration-592440 --memory=3072 --cert-expiration=8760h --driver=docker --container-runtime=containerd │ cert-expiration-592440 │ jenkins │ v1.37.0 │ 29 Nov 25 09:20 UTC │ 29 Nov 25 09:20 UTC │
│ delete │ -p cert-expiration-592440 │ cert-expiration-592440 │ jenkins │ v1.37.0 │ 29 Nov 25 09:20 UTC │ 29 Nov 25 09:20 UTC │
│ start │ -p no-preload-230403 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ no-preload-230403 │ jenkins │ v1.37.0 │ 29 Nov 25 09:20 UTC │ │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────┴─────────┴─────────┴─────────────────────┴────────────
─────────┘
==> Last Start <==
Log file created at: 2025/11/29 09:20:12
Running on machine: ip-172-31-24-2
Binary: Built with gc go1.25.3 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1129 09:20:12.939624 222878 out.go:360] Setting OutFile to fd 1 ...
I1129 09:20:12.939853 222878 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1129 09:20:12.939881 222878 out.go:374] Setting ErrFile to fd 2...
I1129 09:20:12.939901 222878 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1129 09:20:12.940241 222878 root.go:338] Updating PATH: /home/jenkins/minikube-integration/22000-2317/.minikube/bin
I1129 09:20:12.940820 222878 out.go:368] Setting JSON to false
I1129 09:20:12.941892 222878 start.go:133] hostinfo: {"hostname":"ip-172-31-24-2","uptime":3764,"bootTime":1764404249,"procs":190,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"6d436adf-771e-4269-b9a3-c25fd4fca4f5"}
I1129 09:20:12.941996 222878 start.go:143] virtualization:
I1129 09:20:12.947843 222878 out.go:179] * [no-preload-230403] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I1129 09:20:12.951543 222878 out.go:179] - MINIKUBE_LOCATION=22000
I1129 09:20:12.951778 222878 notify.go:221] Checking for updates...
I1129 09:20:12.959740 222878 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1129 09:20:12.963748 222878 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/22000-2317/kubeconfig
I1129 09:20:12.967028 222878 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/22000-2317/.minikube
I1129 09:20:12.970194 222878 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I1129 09:20:12.973266 222878 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1129 09:20:12.976789 222878 config.go:182] Loaded profile config "old-k8s-version-071895": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1129 09:20:12.976879 222878 driver.go:422] Setting default libvirt URI to qemu:///system
I1129 09:20:13.015916 222878 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I1129 09:20:13.016116 222878 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1129 09:20:13.089040 222878 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-29 09:20:13.078615429 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pat
h:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1129 09:20:13.089149 222878 docker.go:319] overlay module found
I1129 09:20:13.094585 222878 out.go:179] * Using the docker driver based on user configuration
I1129 09:20:13.101060 222878 start.go:309] selected driver: docker
I1129 09:20:13.101087 222878 start.go:927] validating driver "docker" against <nil>
I1129 09:20:13.101110 222878 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1129 09:20:13.101860 222878 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1129 09:20:13.162298 222878 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-29 09:20:13.152737541 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pat
h:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1129 09:20:13.162462 222878 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1129 09:20:13.162689 222878 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1129 09:20:13.165689 222878 out.go:179] * Using Docker driver with root privileges
I1129 09:20:13.168555 222878 cni.go:84] Creating CNI manager for ""
I1129 09:20:13.168702 222878 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1129 09:20:13.168717 222878 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1129 09:20:13.168799 222878 start.go:353] cluster config:
{Name:no-preload-230403 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-230403 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Containe
rRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1129 09:20:13.171944 222878 out.go:179] * Starting "no-preload-230403" primary control-plane node in "no-preload-230403" cluster
I1129 09:20:13.174795 222878 cache.go:134] Beginning downloading kic base image for docker with containerd
I1129 09:20:13.177867 222878 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1129 09:20:13.180600 222878 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1129 09:20:13.180815 222878 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1129 09:20:13.180863 222878 cache.go:107] acquiring lock: {Name:mkc9ca05df03f187ae0239342774baa6ad8c9aea Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.180958 222878 cache.go:107] acquiring lock: {Name:mk1a5c919477c9b6035d1da624b0b2445dbe0e73 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.181026 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.34.1 exists
I1129 09:20:13.181043 222878 cache.go:96] cache image "registry.k8s.io/kube-apiserver:v1.34.1" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.34.1" took 86.212µs
I1129 09:20:13.181062 222878 cache.go:80] save to tar file registry.k8s.io/kube-apiserver:v1.34.1 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.34.1 succeeded
I1129 09:20:13.181080 222878 cache.go:107] acquiring lock: {Name:mk74fc1ce0ee5a4f599a03d41c7dab91b2a2e933 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.181115 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.34.1 exists
I1129 09:20:13.181125 222878 cache.go:96] cache image "registry.k8s.io/kube-controller-manager:v1.34.1" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.34.1" took 46.598µs
I1129 09:20:13.181131 222878 cache.go:80] save to tar file registry.k8s.io/kube-controller-manager:v1.34.1 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.34.1 succeeded
I1129 09:20:13.181141 222878 cache.go:107] acquiring lock: {Name:mk8695629c5903582c523a837d766d417499d914 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.181179 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.34.1 exists
I1129 09:20:13.181189 222878 cache.go:96] cache image "registry.k8s.io/kube-scheduler:v1.34.1" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.34.1" took 49.445µs
I1129 09:20:13.181196 222878 cache.go:80] save to tar file registry.k8s.io/kube-scheduler:v1.34.1 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.34.1 succeeded
I1129 09:20:13.181205 222878 cache.go:107] acquiring lock: {Name:mk6962b4fc4c58f41448580e388a757daf8f6018 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.181239 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.34.1 exists
I1129 09:20:13.181249 222878 cache.go:96] cache image "registry.k8s.io/kube-proxy:v1.34.1" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.34.1" took 44.94µs
I1129 09:20:13.181255 222878 cache.go:80] save to tar file registry.k8s.io/kube-proxy:v1.34.1 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.34.1 succeeded
I1129 09:20:13.181269 222878 cache.go:107] acquiring lock: {Name:mk75f52747e0531666c302459e925614b33b76b2 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.181314 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/pause_3.10.1 exists
I1129 09:20:13.181323 222878 cache.go:96] cache image "registry.k8s.io/pause:3.10.1" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/pause_3.10.1" took 55.639µs
I1129 09:20:13.181332 222878 cache.go:80] save to tar file registry.k8s.io/pause:3.10.1 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/pause_3.10.1 succeeded
I1129 09:20:13.181345 222878 cache.go:107] acquiring lock: {Name:mke59d5887f27460b7717e6fa1d7c7be222b2ad7 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.181380 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.6.4-0 exists
I1129 09:20:13.181391 222878 cache.go:96] cache image "registry.k8s.io/etcd:3.6.4-0" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.6.4-0" took 46.433µs
I1129 09:20:13.181396 222878 cache.go:80] save to tar file registry.k8s.io/etcd:3.6.4-0 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.6.4-0 succeeded
I1129 09:20:13.181409 222878 cache.go:107] acquiring lock: {Name:mkece740ade6508db73b1e245e73f976785e2996 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.181442 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.12.1 exists
I1129 09:20:13.181450 222878 cache.go:96] cache image "registry.k8s.io/coredns/coredns:v1.12.1" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.12.1" took 45.654µs
I1129 09:20:13.181455 222878 cache.go:80] save to tar file registry.k8s.io/coredns/coredns:v1.12.1 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.12.1 succeeded
I1129 09:20:13.181552 222878 profile.go:143] Saving config to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/config.json ...
I1129 09:20:13.181573 222878 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/config.json: {Name:mkedfced3d2b7fa7d1f9faae9aecd4cdc6897bf4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:13.181779 222878 cache.go:115] /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5 exists
I1129 09:20:13.181796 222878 cache.go:96] cache image "gcr.io/k8s-minikube/storage-provisioner:v5" -> "/home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5" took 942.365µs
I1129 09:20:13.181804 222878 cache.go:80] save to tar file gcr.io/k8s-minikube/storage-provisioner:v5 -> /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5 succeeded
I1129 09:20:13.181857 222878 cache.go:87] Successfully saved all images to host disk.
I1129 09:20:13.201388 222878 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1129 09:20:13.201410 222878 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1129 09:20:13.201431 222878 cache.go:243] Successfully downloaded all kic artifacts
I1129 09:20:13.201462 222878 start.go:360] acquireMachinesLock for no-preload-230403: {Name:mk2a91c20925489376678f93ce44b3d1de57601f Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1129 09:20:13.201622 222878 start.go:364] duration metric: took 139.242µs to acquireMachinesLock for "no-preload-230403"
I1129 09:20:13.201663 222878 start.go:93] Provisioning new machine with config: &{Name:no-preload-230403 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-230403 Namespace:default APIServerHAVIP: APIServer
Name:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1129 09:20:13.201746 222878 start.go:125] createHost starting for "" (driver="docker")
I1129 09:20:09.378511 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:09.878391 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:10.379008 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:10.879016 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:11.378477 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:11.879067 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:12.378498 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:12.878370 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:13.378426 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:13.879213 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:14.378760 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:14.880612 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:15.379061 219229 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1129 09:20:15.530412 219229 kubeadm.go:1114] duration metric: took 11.369681639s to wait for elevateKubeSystemPrivileges
I1129 09:20:15.530446 219229 kubeadm.go:403] duration metric: took 31.525981112s to StartCluster
I1129 09:20:15.530463 219229 settings.go:142] acquiring lock: {Name:mk44917d1324740eeda65bf3aa312ad1561d3ed4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:15.530529 219229 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/22000-2317/kubeconfig
I1129 09:20:15.531211 219229 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/kubeconfig: {Name:mk3c09eb9158ba85342a695b6ac4b1a5f69e1b04 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:15.531425 219229 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1129 09:20:15.531520 219229 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1129 09:20:15.531760 219229 config.go:182] Loaded profile config "old-k8s-version-071895": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1129 09:20:15.531752 219229 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1129 09:20:15.531869 219229 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-071895"
I1129 09:20:15.531886 219229 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-071895"
I1129 09:20:15.531914 219229 host.go:66] Checking if "old-k8s-version-071895" exists ...
I1129 09:20:15.532442 219229 cli_runner.go:164] Run: docker container inspect old-k8s-version-071895 --format={{.State.Status}}
I1129 09:20:15.532702 219229 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-071895"
I1129 09:20:15.532736 219229 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-071895"
I1129 09:20:15.533094 219229 cli_runner.go:164] Run: docker container inspect old-k8s-version-071895 --format={{.State.Status}}
I1129 09:20:15.536113 219229 out.go:179] * Verifying Kubernetes components...
I1129 09:20:15.539443 219229 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:20:15.574128 219229 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-071895"
I1129 09:20:15.574169 219229 host.go:66] Checking if "old-k8s-version-071895" exists ...
I1129 09:20:15.574614 219229 cli_runner.go:164] Run: docker container inspect old-k8s-version-071895 --format={{.State.Status}}
I1129 09:20:15.575661 219229 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:20:15.578616 219229 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1129 09:20:15.578636 219229 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1129 09:20:15.578703 219229 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-071895
I1129 09:20:15.596399 219229 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1129 09:20:15.596427 219229 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1129 09:20:15.596503 219229 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-071895
I1129 09:20:15.630157 219229 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/old-k8s-version-071895/id_rsa Username:docker}
I1129 09:20:15.639128 219229 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/old-k8s-version-071895/id_rsa Username:docker}
I1129 09:20:15.896152 219229 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1129 09:20:15.896336 219229 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1129 09:20:16.015161 219229 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1129 09:20:16.026843 219229 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1129 09:20:17.194520 219229 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.298139458s)
I1129 09:20:17.194560 219229 start.go:977] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I1129 09:20:17.195641 219229 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (1.299459942s)
I1129 09:20:17.196336 219229 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-071895" to be "Ready" ...
I1129 09:20:17.598641 219229 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.583439516s)
I1129 09:20:17.598752 219229 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (1.571873758s)
I1129 09:20:17.633446 219229 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1129 09:20:13.207006 222878 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1129 09:20:13.207293 222878 start.go:159] libmachine.API.Create for "no-preload-230403" (driver="docker")
I1129 09:20:13.207340 222878 client.go:173] LocalClient.Create starting
I1129 09:20:13.207488 222878 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca.pem
I1129 09:20:13.207529 222878 main.go:143] libmachine: Decoding PEM data...
I1129 09:20:13.207573 222878 main.go:143] libmachine: Parsing certificate...
I1129 09:20:13.207655 222878 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22000-2317/.minikube/certs/cert.pem
I1129 09:20:13.207690 222878 main.go:143] libmachine: Decoding PEM data...
I1129 09:20:13.207710 222878 main.go:143] libmachine: Parsing certificate...
I1129 09:20:13.208128 222878 cli_runner.go:164] Run: docker network inspect no-preload-230403 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1129 09:20:13.227770 222878 cli_runner.go:211] docker network inspect no-preload-230403 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1129 09:20:13.227856 222878 network_create.go:284] running [docker network inspect no-preload-230403] to gather additional debugging logs...
I1129 09:20:13.227880 222878 cli_runner.go:164] Run: docker network inspect no-preload-230403
W1129 09:20:13.250504 222878 cli_runner.go:211] docker network inspect no-preload-230403 returned with exit code 1
I1129 09:20:13.250537 222878 network_create.go:287] error running [docker network inspect no-preload-230403]: docker network inspect no-preload-230403: exit status 1
stdout:
[]
stderr:
Error response from daemon: network no-preload-230403 not found
I1129 09:20:13.250551 222878 network_create.go:289] output of [docker network inspect no-preload-230403]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network no-preload-230403 not found
** /stderr **
I1129 09:20:13.250655 222878 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1129 09:20:13.269213 222878 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-8664e809540f IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:c2:5a:a5:48:89:fb} reservation:<nil>}
I1129 09:20:13.269665 222878 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-fe5a1fed3d29 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:8e:0c:ca:69:14:77} reservation:<nil>}
I1129 09:20:13.270007 222878 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-c3b36bc67c6b IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:02:2d:06:dd:2d:03} reservation:<nil>}
I1129 09:20:13.270333 222878 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-46e34ec2f3d7 IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:7a:63:b9:c9:b8:a0} reservation:<nil>}
I1129 09:20:13.270853 222878 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001a000e0}
I1129 09:20:13.270885 222878 network_create.go:124] attempt to create docker network no-preload-230403 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1129 09:20:13.270944 222878 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=no-preload-230403 no-preload-230403
I1129 09:20:13.339116 222878 network_create.go:108] docker network no-preload-230403 192.168.85.0/24 created
I1129 09:20:13.339148 222878 kic.go:121] calculated static IP "192.168.85.2" for the "no-preload-230403" container
I1129 09:20:13.339222 222878 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1129 09:20:13.358931 222878 cli_runner.go:164] Run: docker volume create no-preload-230403 --label name.minikube.sigs.k8s.io=no-preload-230403 --label created_by.minikube.sigs.k8s.io=true
I1129 09:20:13.376848 222878 oci.go:103] Successfully created a docker volume no-preload-230403
I1129 09:20:13.376977 222878 cli_runner.go:164] Run: docker run --rm --name no-preload-230403-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-230403 --entrypoint /usr/bin/test -v no-preload-230403:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1129 09:20:13.960824 222878 oci.go:107] Successfully prepared a docker volume no-preload-230403
I1129 09:20:13.960886 222878 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
W1129 09:20:13.961020 222878 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1129 09:20:13.961137 222878 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1129 09:20:14.052602 222878 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname no-preload-230403 --name no-preload-230403 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-230403 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=no-preload-230403 --network no-preload-230403 --ip 192.168.85.2 --volume no-preload-230403:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1129 09:20:14.434508 222878 cli_runner.go:164] Run: docker container inspect no-preload-230403 --format={{.State.Running}}
I1129 09:20:14.469095 222878 cli_runner.go:164] Run: docker container inspect no-preload-230403 --format={{.State.Status}}
I1129 09:20:14.505837 222878 cli_runner.go:164] Run: docker exec no-preload-230403 stat /var/lib/dpkg/alternatives/iptables
I1129 09:20:14.574820 222878 oci.go:144] the created container "no-preload-230403" has a running status.
I1129 09:20:14.574847 222878 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/22000-2317/.minikube/machines/no-preload-230403/id_rsa...
I1129 09:20:14.765899 222878 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/22000-2317/.minikube/machines/no-preload-230403/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1129 09:20:14.803197 222878 cli_runner.go:164] Run: docker container inspect no-preload-230403 --format={{.State.Status}}
I1129 09:20:14.838341 222878 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1129 09:20:14.838366 222878 kic_runner.go:114] Args: [docker exec --privileged no-preload-230403 chown docker:docker /home/docker/.ssh/authorized_keys]
I1129 09:20:14.971747 222878 cli_runner.go:164] Run: docker container inspect no-preload-230403 --format={{.State.Status}}
I1129 09:20:14.997195 222878 machine.go:94] provisionDockerMachine start ...
I1129 09:20:14.997331 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:15.036227 222878 main.go:143] libmachine: Using SSH client type: native
I1129 09:20:15.036638 222878 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33058 <nil> <nil>}
I1129 09:20:15.036651 222878 main.go:143] libmachine: About to run SSH command:
hostname
I1129 09:20:15.042876 222878 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1129 09:20:17.636479 219229 addons.go:530] duration metric: took 2.104720222s for enable addons: enabled=[storage-provisioner default-storageclass]
I1129 09:20:17.699584 219229 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-071895" context rescaled to 1 replicas
W1129 09:20:19.201224 219229 node_ready.go:57] node "old-k8s-version-071895" has "Ready":"False" status (will retry)
I1129 09:20:18.208511 222878 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-230403
I1129 09:20:18.208576 222878 ubuntu.go:182] provisioning hostname "no-preload-230403"
I1129 09:20:18.208750 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:18.231955 222878 main.go:143] libmachine: Using SSH client type: native
I1129 09:20:18.232303 222878 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33058 <nil> <nil>}
I1129 09:20:18.232314 222878 main.go:143] libmachine: About to run SSH command:
sudo hostname no-preload-230403 && echo "no-preload-230403" | sudo tee /etc/hostname
I1129 09:20:18.417308 222878 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-230403
I1129 09:20:18.417502 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:18.446833 222878 main.go:143] libmachine: Using SSH client type: native
I1129 09:20:18.447196 222878 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33058 <nil> <nil>}
I1129 09:20:18.447217 222878 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sno-preload-230403' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 no-preload-230403/g' /etc/hosts;
else
echo '127.0.1.1 no-preload-230403' | sudo tee -a /etc/hosts;
fi
fi
I1129 09:20:18.609294 222878 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1129 09:20:18.609323 222878 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/22000-2317/.minikube CaCertPath:/home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/22000-2317/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/22000-2317/.minikube}
I1129 09:20:18.609357 222878 ubuntu.go:190] setting up certificates
I1129 09:20:18.609367 222878 provision.go:84] configureAuth start
I1129 09:20:18.609424 222878 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-230403
I1129 09:20:18.633658 222878 provision.go:143] copyHostCerts
I1129 09:20:18.633724 222878 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-2317/.minikube/ca.pem, removing ...
I1129 09:20:18.633733 222878 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-2317/.minikube/ca.pem
I1129 09:20:18.633804 222878 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/22000-2317/.minikube/ca.pem (1082 bytes)
I1129 09:20:18.633884 222878 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-2317/.minikube/cert.pem, removing ...
I1129 09:20:18.633890 222878 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-2317/.minikube/cert.pem
I1129 09:20:18.633917 222878 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/22000-2317/.minikube/cert.pem (1123 bytes)
I1129 09:20:18.633975 222878 exec_runner.go:144] found /home/jenkins/minikube-integration/22000-2317/.minikube/key.pem, removing ...
I1129 09:20:18.633979 222878 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22000-2317/.minikube/key.pem
I1129 09:20:18.634022 222878 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/22000-2317/.minikube/key.pem (1679 bytes)
I1129 09:20:18.634072 222878 provision.go:117] generating server cert: /home/jenkins/minikube-integration/22000-2317/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca-key.pem org=jenkins.no-preload-230403 san=[127.0.0.1 192.168.85.2 localhost minikube no-preload-230403]
I1129 09:20:18.830643 222878 provision.go:177] copyRemoteCerts
I1129 09:20:18.830732 222878 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1129 09:20:18.830804 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:18.849046 222878 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/no-preload-230403/id_rsa Username:docker}
I1129 09:20:18.957503 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1129 09:20:18.982683 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1129 09:20:19.017142 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1129 09:20:19.036354 222878 provision.go:87] duration metric: took 426.964935ms to configureAuth
I1129 09:20:19.036391 222878 ubuntu.go:206] setting minikube options for container-runtime
I1129 09:20:19.036594 222878 config.go:182] Loaded profile config "no-preload-230403": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1129 09:20:19.036608 222878 machine.go:97] duration metric: took 4.039383275s to provisionDockerMachine
I1129 09:20:19.036705 222878 client.go:176] duration metric: took 5.829342348s to LocalClient.Create
I1129 09:20:19.036723 222878 start.go:167] duration metric: took 5.829433418s to libmachine.API.Create "no-preload-230403"
I1129 09:20:19.036733 222878 start.go:293] postStartSetup for "no-preload-230403" (driver="docker")
I1129 09:20:19.036744 222878 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1129 09:20:19.036810 222878 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1129 09:20:19.036863 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:19.054558 222878 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/no-preload-230403/id_rsa Username:docker}
I1129 09:20:19.161154 222878 ssh_runner.go:195] Run: cat /etc/os-release
I1129 09:20:19.165056 222878 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1129 09:20:19.165086 222878 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1129 09:20:19.165116 222878 filesync.go:126] Scanning /home/jenkins/minikube-integration/22000-2317/.minikube/addons for local assets ...
I1129 09:20:19.165196 222878 filesync.go:126] Scanning /home/jenkins/minikube-integration/22000-2317/.minikube/files for local assets ...
I1129 09:20:19.165294 222878 filesync.go:149] local asset: /home/jenkins/minikube-integration/22000-2317/.minikube/files/etc/ssl/certs/41372.pem -> 41372.pem in /etc/ssl/certs
I1129 09:20:19.165459 222878 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1129 09:20:19.175008 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/files/etc/ssl/certs/41372.pem --> /etc/ssl/certs/41372.pem (1708 bytes)
I1129 09:20:19.202166 222878 start.go:296] duration metric: took 165.419871ms for postStartSetup
I1129 09:20:19.202535 222878 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-230403
I1129 09:20:19.222107 222878 profile.go:143] Saving config to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/config.json ...
I1129 09:20:19.222396 222878 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1129 09:20:19.222436 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:19.240201 222878 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/no-preload-230403/id_rsa Username:docker}
I1129 09:20:19.346358 222878 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1129 09:20:19.351907 222878 start.go:128] duration metric: took 6.150146246s to createHost
I1129 09:20:19.351975 222878 start.go:83] releasing machines lock for "no-preload-230403", held for 6.150337057s
I1129 09:20:19.352082 222878 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-230403
I1129 09:20:19.369647 222878 ssh_runner.go:195] Run: cat /version.json
I1129 09:20:19.369701 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:19.369794 222878 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1129 09:20:19.369854 222878 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-230403
I1129 09:20:19.412764 222878 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/no-preload-230403/id_rsa Username:docker}
I1129 09:20:19.422423 222878 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/22000-2317/.minikube/machines/no-preload-230403/id_rsa Username:docker}
I1129 09:20:19.618519 222878 ssh_runner.go:195] Run: systemctl --version
I1129 09:20:19.626187 222878 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1129 09:20:19.630590 222878 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1129 09:20:19.630681 222878 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1129 09:20:19.659536 222878 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I1129 09:20:19.659559 222878 start.go:496] detecting cgroup driver to use...
I1129 09:20:19.659594 222878 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1129 09:20:19.659644 222878 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1129 09:20:19.675641 222878 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1129 09:20:19.690722 222878 docker.go:218] disabling cri-docker service (if available) ...
I1129 09:20:19.690795 222878 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1129 09:20:19.710602 222878 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1129 09:20:19.735104 222878 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1129 09:20:19.862098 222878 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1129 09:20:20.020548 222878 docker.go:234] disabling docker service ...
I1129 09:20:20.020764 222878 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1129 09:20:20.049579 222878 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1129 09:20:20.066560 222878 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1129 09:20:20.195869 222878 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1129 09:20:20.317681 222878 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1129 09:20:20.332092 222878 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1129 09:20:20.348128 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1129 09:20:20.359261 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1129 09:20:20.369657 222878 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1129 09:20:20.369726 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1129 09:20:20.379235 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1129 09:20:20.388089 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1129 09:20:20.397442 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1129 09:20:20.406391 222878 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1129 09:20:20.414674 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1129 09:20:20.423896 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1129 09:20:20.432684 222878 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1129 09:20:20.441584 222878 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1129 09:20:20.449626 222878 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1129 09:20:20.458580 222878 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:20:20.578649 222878 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1129 09:20:20.669910 222878 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1129 09:20:20.670001 222878 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1129 09:20:20.674049 222878 start.go:564] Will wait 60s for crictl version
I1129 09:20:20.674121 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:20.677882 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1129 09:20:20.711552 222878 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1129 09:20:20.711620 222878 ssh_runner.go:195] Run: containerd --version
I1129 09:20:20.734338 222878 ssh_runner.go:195] Run: containerd --version
I1129 09:20:20.760452 222878 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
I1129 09:20:20.763394 222878 cli_runner.go:164] Run: docker network inspect no-preload-230403 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1129 09:20:20.779886 222878 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I1129 09:20:20.783617 222878 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1129 09:20:20.793588 222878 kubeadm.go:884] updating cluster {Name:no-preload-230403 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-230403 Namespace:default APIServerHAVIP: APIServerName:minikubeCA API
ServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuF
irmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1129 09:20:20.793740 222878 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1129 09:20:20.793820 222878 ssh_runner.go:195] Run: sudo crictl images --output json
I1129 09:20:20.818996 222878 containerd.go:623] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.34.1". assuming images are not preloaded.
I1129 09:20:20.819021 222878 cache_images.go:90] LoadCachedImages start: [registry.k8s.io/kube-apiserver:v1.34.1 registry.k8s.io/kube-controller-manager:v1.34.1 registry.k8s.io/kube-scheduler:v1.34.1 registry.k8s.io/kube-proxy:v1.34.1 registry.k8s.io/pause:3.10.1 registry.k8s.io/etcd:3.6.4-0 registry.k8s.io/coredns/coredns:v1.12.1 gcr.io/k8s-minikube/storage-provisioner:v5]
I1129 09:20:20.819075 222878 image.go:138] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:20:20.819290 222878 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:20:20.819377 222878 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:20:20.819472 222878 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:20:20.819580 222878 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1129 09:20:20.819670 222878 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1129 09:20:20.819757 222878 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1129 09:20:20.819836 222878 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:20:20.820993 222878 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:20:20.821570 222878 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:20:20.821829 222878 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:20:20.821983 222878 image.go:181] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:20:20.822235 222878 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1129 09:20:20.822385 222878 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:20:20.822667 222878 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1129 09:20:20.823079 222878 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1129 09:20:21.122603 222878 containerd.go:267] Checking existence of image with name "registry.k8s.io/pause:3.10.1" and sha "d7b100cd9a77ba782c5e428c8dd5a1df4a1e79d4cb6294acd7d01290ab3babbd"
I1129 09:20:21.122681 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/pause:3.10.1
I1129 09:20:21.142272 222878 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.34.1" and sha "7eb2c6ff0c5a768fd309321bc2ade0e4e11afcf4f2017ef1d0ff00d91fdf992a"
I1129 09:20:21.142372 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:20:21.156765 222878 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.34.1" and sha "b5f57ec6b98676d815366685a0422bd164ecf0732540b79ac51b1186cef97ff0"
I1129 09:20:21.156842 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:20:21.158253 222878 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.34.1" and sha "43911e833d64d4f30460862fc0c54bb61999d60bc7d063feca71e9fc610d5196"
I1129 09:20:21.158318 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:20:21.159304 222878 containerd.go:267] Checking existence of image with name "registry.k8s.io/coredns/coredns:v1.12.1" and sha "138784d87c9c50f8e59412544da4cf4928d61ccbaf93b9f5898a3ba406871bfc"
I1129 09:20:21.159366 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:20:21.163083 222878 containerd.go:267] Checking existence of image with name "registry.k8s.io/etcd:3.6.4-0" and sha "a1894772a478e07c67a56e8bf32335fdbe1dd4ec96976a5987083164bd00bc0e"
I1129 09:20:21.163151 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/etcd:3.6.4-0
I1129 09:20:21.163275 222878 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.34.1" and sha "05baa95f5142d87797a2bc1d3d11edfb0bf0a9236d436243d15061fae8b58cb9"
I1129 09:20:21.163342 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-proxy:v1.34.1
I1129 09:20:21.165618 222878 cache_images.go:118] "registry.k8s.io/pause:3.10.1" needs transfer: "registry.k8s.io/pause:3.10.1" does not exist at hash "d7b100cd9a77ba782c5e428c8dd5a1df4a1e79d4cb6294acd7d01290ab3babbd" in container runtime
I1129 09:20:21.165704 222878 cri.go:218] Removing image: registry.k8s.io/pause:3.10.1
I1129 09:20:21.165791 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:21.179345 222878 cache_images.go:118] "registry.k8s.io/kube-controller-manager:v1.34.1" needs transfer: "registry.k8s.io/kube-controller-manager:v1.34.1" does not exist at hash "7eb2c6ff0c5a768fd309321bc2ade0e4e11afcf4f2017ef1d0ff00d91fdf992a" in container runtime
I1129 09:20:21.179432 222878 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:20:21.179520 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:21.225665 222878 cache_images.go:118] "registry.k8s.io/kube-scheduler:v1.34.1" needs transfer: "registry.k8s.io/kube-scheduler:v1.34.1" does not exist at hash "b5f57ec6b98676d815366685a0422bd164ecf0732540b79ac51b1186cef97ff0" in container runtime
I1129 09:20:21.225755 222878 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:20:21.225854 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:21.225939 222878 cache_images.go:118] "registry.k8s.io/kube-apiserver:v1.34.1" needs transfer: "registry.k8s.io/kube-apiserver:v1.34.1" does not exist at hash "43911e833d64d4f30460862fc0c54bb61999d60bc7d063feca71e9fc610d5196" in container runtime
I1129 09:20:21.225991 222878 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:20:21.226032 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:21.226126 222878 cache_images.go:118] "registry.k8s.io/coredns/coredns:v1.12.1" needs transfer: "registry.k8s.io/coredns/coredns:v1.12.1" does not exist at hash "138784d87c9c50f8e59412544da4cf4928d61ccbaf93b9f5898a3ba406871bfc" in container runtime
I1129 09:20:21.226162 222878 cri.go:218] Removing image: registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:20:21.226209 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:21.237496 222878 cache_images.go:118] "registry.k8s.io/etcd:3.6.4-0" needs transfer: "registry.k8s.io/etcd:3.6.4-0" does not exist at hash "a1894772a478e07c67a56e8bf32335fdbe1dd4ec96976a5987083164bd00bc0e" in container runtime
I1129 09:20:21.237581 222878 cri.go:218] Removing image: registry.k8s.io/etcd:3.6.4-0
I1129 09:20:21.237665 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:21.239070 222878 cache_images.go:118] "registry.k8s.io/kube-proxy:v1.34.1" needs transfer: "registry.k8s.io/kube-proxy:v1.34.1" does not exist at hash "05baa95f5142d87797a2bc1d3d11edfb0bf0a9236d436243d15061fae8b58cb9" in container runtime
I1129 09:20:21.239288 222878 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.34.1
I1129 09:20:21.239346 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:21.239286 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:20:21.239244 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1129 09:20:21.240343 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:20:21.240430 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:20:21.240578 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:20:21.248302 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1129 09:20:21.337972 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1129 09:20:21.338141 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1129 09:20:21.338156 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:20:21.350334 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:20:21.350500 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:20:21.350586 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:20:21.350679 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1129 09:20:21.436779 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1129 09:20:21.436931 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1129 09:20:21.437008 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1129 09:20:21.482969 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1129 09:20:21.483085 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1129 09:20:21.483137 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1129 09:20:21.491181 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1129 09:20:21.551573 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/pause_3.10.1
I1129 09:20:21.551783 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1
I1129 09:20:21.551782 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1129 09:20:21.551677 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.34.1
I1129 09:20:21.551991 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1129 09:20:21.589991 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.12.1
I1129 09:20:21.590095 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1
I1129 09:20:21.590176 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.34.1
I1129 09:20:21.590233 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1
I1129 09:20:21.590311 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.34.1
I1129 09:20:21.590381 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1
I1129 09:20:21.599084 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.6.4-0
I1129 09:20:21.599203 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0
I1129 09:20:21.606906 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-controller-manager_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-controller-manager_v1.34.1': No such file or directory
I1129 09:20:21.607120 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.34.1 --> /var/lib/minikube/images/kube-controller-manager_v1.34.1 (20730880 bytes)
I1129 09:20:21.607120 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-scheduler_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-scheduler_v1.34.1': No such file or directory
I1129 09:20:21.607245 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.34.1 --> /var/lib/minikube/images/kube-scheduler_v1.34.1 (15790592 bytes)
I1129 09:20:21.607065 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.34.1
I1129 09:20:21.607080 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-apiserver_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-apiserver_v1.34.1': No such file or directory
I1129 09:20:21.607377 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.34.1 --> /var/lib/minikube/images/kube-apiserver_v1.34.1 (24581632 bytes)
I1129 09:20:21.607089 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/coredns_v1.12.1: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/coredns_v1.12.1': No such file or directory
I1129 09:20:21.607470 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.12.1 --> /var/lib/minikube/images/coredns_v1.12.1 (20402176 bytes)
I1129 09:20:21.607010 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/pause_3.10.1: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/pause_3.10.1': No such file or directory
I1129 09:20:21.607558 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/pause_3.10.1 --> /var/lib/minikube/images/pause_3.10.1 (268288 bytes)
I1129 09:20:21.607693 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1
I1129 09:20:21.611409 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/etcd_3.6.4-0: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/etcd_3.6.4-0': No such file or directory
I1129 09:20:21.611475 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.6.4-0 --> /var/lib/minikube/images/etcd_3.6.4-0 (98216960 bytes)
I1129 09:20:21.621627 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-proxy_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-proxy_v1.34.1': No such file or directory
I1129 09:20:21.621809 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.34.1 --> /var/lib/minikube/images/kube-proxy_v1.34.1 (22790144 bytes)
I1129 09:20:21.715246 222878 containerd.go:285] Loading image: /var/lib/minikube/images/pause_3.10.1
I1129 09:20:21.715371 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/pause_3.10.1
I1129 09:20:22.049743 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/pause_3.10.1 from cache
I1129 09:20:22.146786 222878 containerd.go:285] Loading image: /var/lib/minikube/images/kube-scheduler_v1.34.1
I1129 09:20:22.146909 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1
W1129 09:20:22.239238 222878 image.go:286] image gcr.io/k8s-minikube/storage-provisioner:v5 arch mismatch: want arm64 got amd64. fixing
I1129 09:20:22.239372 222878 containerd.go:267] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "66749159455b3f08c8318fe0233122f54d0f5889f9c5fdfb73c3fd9d99895b51"
I1129 09:20:22.239461 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5
W1129 09:20:21.201342 219229 node_ready.go:57] node "old-k8s-version-071895" has "Ready":"False" status (will retry)
W1129 09:20:23.202246 219229 node_ready.go:57] node "old-k8s-version-071895" has "Ready":"False" status (will retry)
I1129 09:20:23.813839 222878 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1: (1.666881209s)
I1129 09:20:23.813866 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-scheduler_v1.34.1 from cache
I1129 09:20:23.813884 222878 containerd.go:285] Loading image: /var/lib/minikube/images/coredns_v1.12.1
I1129 09:20:23.813934 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1
I1129 09:20:23.813990 222878 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5: (1.574510089s)
I1129 09:20:23.814059 222878 cache_images.go:118] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "66749159455b3f08c8318fe0233122f54d0f5889f9c5fdfb73c3fd9d99895b51" in container runtime
I1129 09:20:23.814109 222878 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:20:23.814162 222878 ssh_runner.go:195] Run: which crictl
I1129 09:20:25.262220 222878 ssh_runner.go:235] Completed: which crictl: (1.448029919s)
I1129 09:20:25.262315 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:20:25.262227 222878 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1: (1.44826357s)
I1129 09:20:25.262380 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/coredns/coredns_v1.12.1 from cache
I1129 09:20:25.262400 222878 containerd.go:285] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1129 09:20:25.262443 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1129 09:20:26.253409 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-controller-manager_v1.34.1 from cache
I1129 09:20:26.253448 222878 containerd.go:285] Loading image: /var/lib/minikube/images/kube-apiserver_v1.34.1
I1129 09:20:26.253502 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1
I1129 09:20:26.253588 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:20:27.306910 222878 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1: (1.053379529s)
I1129 09:20:27.306932 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-apiserver_v1.34.1 from cache
I1129 09:20:27.306934 222878 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.053324259s)
I1129 09:20:27.306948 222878 containerd.go:285] Loading image: /var/lib/minikube/images/kube-proxy_v1.34.1
I1129 09:20:27.306998 222878 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1129 09:20:27.306998 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1
I1129 09:20:27.339643 222878 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5
I1129 09:20:27.339756 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5
W1129 09:20:25.701399 219229 node_ready.go:57] node "old-k8s-version-071895" has "Ready":"False" status (will retry)
W1129 09:20:28.200255 219229 node_ready.go:57] node "old-k8s-version-071895" has "Ready":"False" status (will retry)
I1129 09:20:29.701513 219229 node_ready.go:49] node "old-k8s-version-071895" is "Ready"
I1129 09:20:29.701545 219229 node_ready.go:38] duration metric: took 12.504000526s for node "old-k8s-version-071895" to be "Ready" ...
I1129 09:20:29.701560 219229 api_server.go:52] waiting for apiserver process to appear ...
I1129 09:20:29.701622 219229 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1129 09:20:29.719485 219229 api_server.go:72] duration metric: took 14.188022937s to wait for apiserver process to appear ...
I1129 09:20:29.719511 219229 api_server.go:88] waiting for apiserver healthz status ...
I1129 09:20:29.719530 219229 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1129 09:20:29.736520 219229 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I1129 09:20:29.740376 219229 api_server.go:141] control plane version: v1.28.0
I1129 09:20:29.740411 219229 api_server.go:131] duration metric: took 20.892436ms to wait for apiserver health ...
I1129 09:20:29.740421 219229 system_pods.go:43] waiting for kube-system pods to appear ...
I1129 09:20:29.748136 219229 system_pods.go:59] 8 kube-system pods found
I1129 09:20:29.748178 219229 system_pods.go:61] "coredns-5dd5756b68-htmzr" [c6b5f2ee-df4f-40a3-be2e-6f16e858e497] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:20:29.748186 219229 system_pods.go:61] "etcd-old-k8s-version-071895" [79f6e3b1-4d0e-480f-ba81-e9c28edc83ac] Running
I1129 09:20:29.748192 219229 system_pods.go:61] "kindnet-58g5f" [d4743cee-0834-4a44-9cf7-d0228aa5b843] Running
I1129 09:20:29.748201 219229 system_pods.go:61] "kube-apiserver-old-k8s-version-071895" [81748b80-7ec0-4a82-b646-673534a05137] Running
I1129 09:20:29.748206 219229 system_pods.go:61] "kube-controller-manager-old-k8s-version-071895" [b6691622-dfbd-4b77-bedd-c7a97120a360] Running
I1129 09:20:29.748209 219229 system_pods.go:61] "kube-proxy-4jxrn" [3e4bdb82-85e5-468b-80dc-0481c990f117] Running
I1129 09:20:29.748213 219229 system_pods.go:61] "kube-scheduler-old-k8s-version-071895" [fe7f98e1-0743-41d8-869a-4807c081f621] Running
I1129 09:20:29.748219 219229 system_pods.go:61] "storage-provisioner" [784fe707-ae15-4eae-a70c-ec084ce3d812] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:20:29.748231 219229 system_pods.go:74] duration metric: took 7.804151ms to wait for pod list to return data ...
I1129 09:20:29.748241 219229 default_sa.go:34] waiting for default service account to be created ...
I1129 09:20:29.751107 219229 default_sa.go:45] found service account: "default"
I1129 09:20:29.751135 219229 default_sa.go:55] duration metric: took 2.887312ms for default service account to be created ...
I1129 09:20:29.751147 219229 system_pods.go:116] waiting for k8s-apps to be running ...
I1129 09:20:29.757754 219229 system_pods.go:86] 8 kube-system pods found
I1129 09:20:29.757797 219229 system_pods.go:89] "coredns-5dd5756b68-htmzr" [c6b5f2ee-df4f-40a3-be2e-6f16e858e497] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:20:29.757804 219229 system_pods.go:89] "etcd-old-k8s-version-071895" [79f6e3b1-4d0e-480f-ba81-e9c28edc83ac] Running
I1129 09:20:29.757810 219229 system_pods.go:89] "kindnet-58g5f" [d4743cee-0834-4a44-9cf7-d0228aa5b843] Running
I1129 09:20:29.757815 219229 system_pods.go:89] "kube-apiserver-old-k8s-version-071895" [81748b80-7ec0-4a82-b646-673534a05137] Running
I1129 09:20:29.757819 219229 system_pods.go:89] "kube-controller-manager-old-k8s-version-071895" [b6691622-dfbd-4b77-bedd-c7a97120a360] Running
I1129 09:20:29.757823 219229 system_pods.go:89] "kube-proxy-4jxrn" [3e4bdb82-85e5-468b-80dc-0481c990f117] Running
I1129 09:20:29.757827 219229 system_pods.go:89] "kube-scheduler-old-k8s-version-071895" [fe7f98e1-0743-41d8-869a-4807c081f621] Running
I1129 09:20:29.757833 219229 system_pods.go:89] "storage-provisioner" [784fe707-ae15-4eae-a70c-ec084ce3d812] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:20:29.757863 219229 retry.go:31] will retry after 212.604223ms: missing components: kube-dns
I1129 09:20:29.976302 219229 system_pods.go:86] 8 kube-system pods found
I1129 09:20:29.976339 219229 system_pods.go:89] "coredns-5dd5756b68-htmzr" [c6b5f2ee-df4f-40a3-be2e-6f16e858e497] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:20:29.976347 219229 system_pods.go:89] "etcd-old-k8s-version-071895" [79f6e3b1-4d0e-480f-ba81-e9c28edc83ac] Running
I1129 09:20:29.976353 219229 system_pods.go:89] "kindnet-58g5f" [d4743cee-0834-4a44-9cf7-d0228aa5b843] Running
I1129 09:20:29.976359 219229 system_pods.go:89] "kube-apiserver-old-k8s-version-071895" [81748b80-7ec0-4a82-b646-673534a05137] Running
I1129 09:20:29.976364 219229 system_pods.go:89] "kube-controller-manager-old-k8s-version-071895" [b6691622-dfbd-4b77-bedd-c7a97120a360] Running
I1129 09:20:29.976368 219229 system_pods.go:89] "kube-proxy-4jxrn" [3e4bdb82-85e5-468b-80dc-0481c990f117] Running
I1129 09:20:29.976373 219229 system_pods.go:89] "kube-scheduler-old-k8s-version-071895" [fe7f98e1-0743-41d8-869a-4807c081f621] Running
I1129 09:20:29.976379 219229 system_pods.go:89] "storage-provisioner" [784fe707-ae15-4eae-a70c-ec084ce3d812] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:20:29.976398 219229 retry.go:31] will retry after 279.278138ms: missing components: kube-dns
I1129 09:20:30.268579 219229 system_pods.go:86] 8 kube-system pods found
I1129 09:20:30.268774 219229 system_pods.go:89] "coredns-5dd5756b68-htmzr" [c6b5f2ee-df4f-40a3-be2e-6f16e858e497] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:20:30.268790 219229 system_pods.go:89] "etcd-old-k8s-version-071895" [79f6e3b1-4d0e-480f-ba81-e9c28edc83ac] Running
I1129 09:20:30.268797 219229 system_pods.go:89] "kindnet-58g5f" [d4743cee-0834-4a44-9cf7-d0228aa5b843] Running
I1129 09:20:30.268802 219229 system_pods.go:89] "kube-apiserver-old-k8s-version-071895" [81748b80-7ec0-4a82-b646-673534a05137] Running
I1129 09:20:30.268807 219229 system_pods.go:89] "kube-controller-manager-old-k8s-version-071895" [b6691622-dfbd-4b77-bedd-c7a97120a360] Running
I1129 09:20:30.268811 219229 system_pods.go:89] "kube-proxy-4jxrn" [3e4bdb82-85e5-468b-80dc-0481c990f117] Running
I1129 09:20:30.268816 219229 system_pods.go:89] "kube-scheduler-old-k8s-version-071895" [fe7f98e1-0743-41d8-869a-4807c081f621] Running
I1129 09:20:30.268826 219229 system_pods.go:89] "storage-provisioner" [784fe707-ae15-4eae-a70c-ec084ce3d812] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:20:30.268843 219229 retry.go:31] will retry after 368.451427ms: missing components: kube-dns
I1129 09:20:30.642681 219229 system_pods.go:86] 8 kube-system pods found
I1129 09:20:30.642718 219229 system_pods.go:89] "coredns-5dd5756b68-htmzr" [c6b5f2ee-df4f-40a3-be2e-6f16e858e497] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1129 09:20:30.642726 219229 system_pods.go:89] "etcd-old-k8s-version-071895" [79f6e3b1-4d0e-480f-ba81-e9c28edc83ac] Running
I1129 09:20:30.642733 219229 system_pods.go:89] "kindnet-58g5f" [d4743cee-0834-4a44-9cf7-d0228aa5b843] Running
I1129 09:20:30.642738 219229 system_pods.go:89] "kube-apiserver-old-k8s-version-071895" [81748b80-7ec0-4a82-b646-673534a05137] Running
I1129 09:20:30.642743 219229 system_pods.go:89] "kube-controller-manager-old-k8s-version-071895" [b6691622-dfbd-4b77-bedd-c7a97120a360] Running
I1129 09:20:30.642747 219229 system_pods.go:89] "kube-proxy-4jxrn" [3e4bdb82-85e5-468b-80dc-0481c990f117] Running
I1129 09:20:30.642752 219229 system_pods.go:89] "kube-scheduler-old-k8s-version-071895" [fe7f98e1-0743-41d8-869a-4807c081f621] Running
I1129 09:20:30.642761 219229 system_pods.go:89] "storage-provisioner" [784fe707-ae15-4eae-a70c-ec084ce3d812] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1129 09:20:30.642776 219229 retry.go:31] will retry after 521.296683ms: missing components: kube-dns
I1129 09:20:31.171413 219229 system_pods.go:86] 8 kube-system pods found
I1129 09:20:31.171442 219229 system_pods.go:89] "coredns-5dd5756b68-htmzr" [c6b5f2ee-df4f-40a3-be2e-6f16e858e497] Running
I1129 09:20:31.171449 219229 system_pods.go:89] "etcd-old-k8s-version-071895" [79f6e3b1-4d0e-480f-ba81-e9c28edc83ac] Running
I1129 09:20:31.171454 219229 system_pods.go:89] "kindnet-58g5f" [d4743cee-0834-4a44-9cf7-d0228aa5b843] Running
I1129 09:20:31.171472 219229 system_pods.go:89] "kube-apiserver-old-k8s-version-071895" [81748b80-7ec0-4a82-b646-673534a05137] Running
I1129 09:20:31.171482 219229 system_pods.go:89] "kube-controller-manager-old-k8s-version-071895" [b6691622-dfbd-4b77-bedd-c7a97120a360] Running
I1129 09:20:31.171487 219229 system_pods.go:89] "kube-proxy-4jxrn" [3e4bdb82-85e5-468b-80dc-0481c990f117] Running
I1129 09:20:31.171502 219229 system_pods.go:89] "kube-scheduler-old-k8s-version-071895" [fe7f98e1-0743-41d8-869a-4807c081f621] Running
I1129 09:20:31.171506 219229 system_pods.go:89] "storage-provisioner" [784fe707-ae15-4eae-a70c-ec084ce3d812] Running
I1129 09:20:31.171514 219229 system_pods.go:126] duration metric: took 1.420361927s to wait for k8s-apps to be running ...
I1129 09:20:31.171522 219229 system_svc.go:44] waiting for kubelet service to be running ....
I1129 09:20:31.171578 219229 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1129 09:20:31.191104 219229 system_svc.go:56] duration metric: took 19.570105ms WaitForService to wait for kubelet
I1129 09:20:31.191198 219229 kubeadm.go:587] duration metric: took 15.659726511s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1129 09:20:31.191233 219229 node_conditions.go:102] verifying NodePressure condition ...
I1129 09:20:31.194404 219229 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1129 09:20:31.194485 219229 node_conditions.go:123] node cpu capacity is 2
I1129 09:20:31.194514 219229 node_conditions.go:105] duration metric: took 3.245952ms to run NodePressure ...
I1129 09:20:31.194558 219229 start.go:242] waiting for startup goroutines ...
I1129 09:20:31.194583 219229 start.go:247] waiting for cluster config update ...
I1129 09:20:31.194611 219229 start.go:256] writing updated cluster config ...
I1129 09:20:31.195146 219229 ssh_runner.go:195] Run: rm -f paused
I1129 09:20:31.201208 219229 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1129 09:20:31.206616 219229 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-htmzr" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.217168 219229 pod_ready.go:94] pod "coredns-5dd5756b68-htmzr" is "Ready"
I1129 09:20:31.217243 219229 pod_ready.go:86] duration metric: took 10.548708ms for pod "coredns-5dd5756b68-htmzr" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.223645 219229 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.234784 219229 pod_ready.go:94] pod "etcd-old-k8s-version-071895" is "Ready"
I1129 09:20:31.234859 219229 pod_ready.go:86] duration metric: took 11.131317ms for pod "etcd-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.248582 219229 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.259407 219229 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-071895" is "Ready"
I1129 09:20:31.259482 219229 pod_ready.go:86] duration metric: took 10.819537ms for pod "kube-apiserver-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.263998 219229 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.606531 219229 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-071895" is "Ready"
I1129 09:20:31.606610 219229 pod_ready.go:86] duration metric: took 342.539937ms for pod "kube-controller-manager-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:31.808005 219229 pod_ready.go:83] waiting for pod "kube-proxy-4jxrn" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:32.206161 219229 pod_ready.go:94] pod "kube-proxy-4jxrn" is "Ready"
I1129 09:20:32.206190 219229 pod_ready.go:86] duration metric: took 398.137324ms for pod "kube-proxy-4jxrn" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:32.422404 219229 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:32.806577 219229 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-071895" is "Ready"
I1129 09:20:32.806676 219229 pod_ready.go:86] duration metric: took 384.18875ms for pod "kube-scheduler-old-k8s-version-071895" in "kube-system" namespace to be "Ready" or be gone ...
I1129 09:20:32.806706 219229 pod_ready.go:40] duration metric: took 1.605412666s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1129 09:20:32.883122 219229 start.go:625] kubectl: 1.33.2, cluster: 1.28.0 (minor skew: 5)
I1129 09:20:32.886925 219229 out.go:203]
W1129 09:20:32.889873 219229 out.go:285] ! /usr/local/bin/kubectl is version 1.33.2, which may have incompatibilities with Kubernetes 1.28.0.
I1129 09:20:32.892945 219229 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1129 09:20:32.896883 219229 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-071895" cluster and "default" namespace by default
I1129 09:20:28.381724 222878 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1: (1.074642707s)
I1129 09:20:28.381753 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/kube-proxy_v1.34.1 from cache
I1129 09:20:28.381780 222878 containerd.go:285] Loading image: /var/lib/minikube/images/etcd_3.6.4-0
I1129 09:20:28.381828 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0
I1129 09:20:28.381907 222878 ssh_runner.go:235] Completed: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: (1.042136021s)
I1129 09:20:28.381924 222878 ssh_runner.go:352] existence check for /var/lib/minikube/images/storage-provisioner_v5: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/storage-provisioner_v5': No such file or directory
I1129 09:20:28.381944 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5 --> /var/lib/minikube/images/storage-provisioner_v5 (8035840 bytes)
I1129 09:20:31.974151 222878 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0: (3.592291332s)
I1129 09:20:31.974192 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/registry.k8s.io/etcd_3.6.4-0 from cache
I1129 09:20:31.974218 222878 containerd.go:285] Loading image: /var/lib/minikube/images/storage-provisioner_v5
I1129 09:20:31.974299 222878 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5
I1129 09:20:32.697903 222878 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/22000-2317/.minikube/cache/images/arm64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
I1129 09:20:32.697943 222878 cache_images.go:125] Successfully loaded all cached images
I1129 09:20:32.697949 222878 cache_images.go:94] duration metric: took 11.878914483s to LoadCachedImages
I1129 09:20:32.697961 222878 kubeadm.go:935] updating node { 192.168.85.2 8443 v1.34.1 containerd true true} ...
I1129 09:20:32.698052 222878 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-230403 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:no-preload-230403 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1129 09:20:32.698117 222878 ssh_runner.go:195] Run: sudo crictl info
I1129 09:20:32.724003 222878 cni.go:84] Creating CNI manager for ""
I1129 09:20:32.724023 222878 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1129 09:20:32.724042 222878 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1129 09:20:32.724064 222878 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-230403 NodeName:no-preload-230403 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodP
ath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1129 09:20:32.724177 222878 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "no-preload-230403"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.85.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1129 09:20:32.724247 222878 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1129 09:20:32.734586 222878 binaries.go:54] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.34.1: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/binaries/v1.34.1': No such file or directory
Initiating transfer...
I1129 09:20:32.734661 222878 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.34.1
I1129 09:20:32.744055 222878 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/arm64/kubectl?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/arm64/kubectl.sha256
I1129 09:20:32.744148 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl
I1129 09:20:32.744244 222878 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/arm64/kubelet?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/arm64/kubelet.sha256
I1129 09:20:32.744287 222878 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1129 09:20:32.744372 222878 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/arm64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/arm64/kubeadm.sha256
I1129 09:20:32.744422 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm
I1129 09:20:32.765160 222878 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubeadm: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubeadm': No such file or directory
I1129 09:20:32.765194 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/linux/arm64/v1.34.1/kubeadm --> /var/lib/minikube/binaries/v1.34.1/kubeadm (71434424 bytes)
I1129 09:20:32.765213 222878 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubectl: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubectl': No such file or directory
I1129 09:20:32.765239 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/linux/arm64/v1.34.1/kubectl --> /var/lib/minikube/binaries/v1.34.1/kubectl (58130616 bytes)
I1129 09:20:32.765317 222878 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet
I1129 09:20:32.779265 222878 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubelet: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubelet': No such file or directory
I1129 09:20:32.779306 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/cache/linux/arm64/v1.34.1/kubelet --> /var/lib/minikube/binaries/v1.34.1/kubelet (56426788 bytes)
I1129 09:20:33.994121 222878 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1129 09:20:34.006964 222878 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (321 bytes)
I1129 09:20:34.022992 222878 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1129 09:20:34.039936 222878 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2230 bytes)
I1129 09:20:34.054478 222878 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I1129 09:20:34.059158 222878 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1129 09:20:34.071443 222878 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1129 09:20:34.198077 222878 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1129 09:20:34.225128 222878 certs.go:69] Setting up /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403 for IP: 192.168.85.2
I1129 09:20:34.225153 222878 certs.go:195] generating shared ca certs ...
I1129 09:20:34.225176 222878 certs.go:227] acquiring lock for ca certs: {Name:mke655c14945a8520f2f9de36531df923afb2bda Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:34.225330 222878 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/22000-2317/.minikube/ca.key
I1129 09:20:34.225385 222878 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/22000-2317/.minikube/proxy-client-ca.key
I1129 09:20:34.225397 222878 certs.go:257] generating profile certs ...
I1129 09:20:34.225460 222878 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/client.key
I1129 09:20:34.225477 222878 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/client.crt with IP's: []
I1129 09:20:34.561780 222878 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/client.crt ...
I1129 09:20:34.561812 222878 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/client.crt: {Name:mk0506510be8624c61cf78aca5533a42dbe12049 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:34.562018 222878 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/client.key ...
I1129 09:20:34.562032 222878 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/client.key: {Name:mk7728838f62624078d9f102edcc2e7e92fca24a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:34.562134 222878 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.key.9c37d96b
I1129 09:20:34.562155 222878 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.crt.9c37d96b with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.85.2]
I1129 09:20:35.279064 222878 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.crt.9c37d96b ...
I1129 09:20:35.279097 222878 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.crt.9c37d96b: {Name:mkb8ab5f6d41eda35913c9ea362db6a34366a395 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:35.279295 222878 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.key.9c37d96b ...
I1129 09:20:35.279312 222878 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.key.9c37d96b: {Name:mk21caee54335560e86fdf60eec601c387bdb604 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:35.279403 222878 certs.go:382] copying /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.crt.9c37d96b -> /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.crt
I1129 09:20:35.279483 222878 certs.go:386] copying /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.key.9c37d96b -> /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.key
I1129 09:20:35.279555 222878 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.key
I1129 09:20:35.279573 222878 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.crt with IP's: []
I1129 09:20:35.662938 222878 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.crt ...
I1129 09:20:35.662968 222878 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.crt: {Name:mk84c114a546c4abdb7a044023d46a90cfce8d04 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:35.663145 222878 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.key ...
I1129 09:20:35.663161 222878 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.key: {Name:mk0fc11a967c87ab7d123db8f16798c3182082c0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1129 09:20:35.663352 222878 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/4137.pem (1338 bytes)
W1129 09:20:35.663398 222878 certs.go:480] ignoring /home/jenkins/minikube-integration/22000-2317/.minikube/certs/4137_empty.pem, impossibly tiny 0 bytes
I1129 09:20:35.663418 222878 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca-key.pem (1675 bytes)
I1129 09:20:35.663446 222878 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/ca.pem (1082 bytes)
I1129 09:20:35.663474 222878 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/cert.pem (1123 bytes)
I1129 09:20:35.663499 222878 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-2317/.minikube/certs/key.pem (1679 bytes)
I1129 09:20:35.663547 222878 certs.go:484] found cert: /home/jenkins/minikube-integration/22000-2317/.minikube/files/etc/ssl/certs/41372.pem (1708 bytes)
I1129 09:20:35.664157 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1129 09:20:35.691460 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I1129 09:20:35.717525 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1129 09:20:35.745851 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1129 09:20:35.769815 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1129 09:20:35.790501 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1129 09:20:35.812066 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1129 09:20:35.830915 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/profiles/no-preload-230403/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1129 09:20:35.849395 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/certs/4137.pem --> /usr/share/ca-certificates/4137.pem (1338 bytes)
I1129 09:20:35.872584 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/files/etc/ssl/certs/41372.pem --> /usr/share/ca-certificates/41372.pem (1708 bytes)
I1129 09:20:35.893049 222878 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22000-2317/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1129 09:20:35.918494 222878 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1129 09:20:35.936255 222878 ssh_runner.go:195] Run: openssl version
I1129 09:20:35.943518 222878 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/41372.pem && ln -fs /usr/share/ca-certificates/41372.pem /etc/ssl/certs/41372.pem"
I1129 09:20:35.954406 222878 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/41372.pem
I1129 09:20:35.959997 222878 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 29 08:36 /usr/share/ca-certificates/41372.pem
I1129 09:20:35.960085 222878 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/41372.pem
I1129 09:20:36.006091 222878 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/41372.pem /etc/ssl/certs/3ec20f2e.0"
I1129 09:20:36.017475 222878 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1129 09:20:36.027314 222878 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1129 09:20:36.031927 222878 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 29 08:29 /usr/share/ca-certificates/minikubeCA.pem
I1129 09:20:36.031999 222878 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1129 09:20:36.075486 222878 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1129 09:20:36.084604 222878 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/4137.pem && ln -fs /usr/share/ca-certificates/4137.pem /etc/ssl/certs/4137.pem"
I1129 09:20:36.094214 222878 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/4137.pem
I1129 09:20:36.098768 222878 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 29 08:36 /usr/share/ca-certificates/4137.pem
I1129 09:20:36.098840 222878 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/4137.pem
I1129 09:20:36.143207 222878 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/4137.pem /etc/ssl/certs/51391683.0"
I1129 09:20:36.152425 222878 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1129 09:20:36.156708 222878 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1129 09:20:36.156761 222878 kubeadm.go:401] StartCluster: {Name:no-preload-230403 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-230403 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISer
verNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirm
warePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1129 09:20:36.156839 222878 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1129 09:20:36.156905 222878 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1129 09:20:36.184470 222878 cri.go:89] found id: ""
I1129 09:20:36.184537 222878 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1129 09:20:36.193057 222878 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1129 09:20:36.201441 222878 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1129 09:20:36.201527 222878 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1129 09:20:36.210060 222878 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1129 09:20:36.210079 222878 kubeadm.go:158] found existing configuration files:
I1129 09:20:36.210164 222878 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1129 09:20:36.218503 222878 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1129 09:20:36.218590 222878 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1129 09:20:36.226704 222878 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1129 09:20:36.235392 222878 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1129 09:20:36.235519 222878 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1129 09:20:36.243976 222878 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1129 09:20:36.252727 222878 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1129 09:20:36.252802 222878 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1129 09:20:36.261462 222878 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1129 09:20:36.270714 222878 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1129 09:20:36.270782 222878 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1129 09:20:36.278924 222878 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1129 09:20:36.329064 222878 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1129 09:20:36.329252 222878 kubeadm.go:319] [preflight] Running pre-flight checks
I1129 09:20:36.365187 222878 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1129 09:20:36.365275 222878 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I1129 09:20:36.365324 222878 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1129 09:20:36.365388 222878 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1129 09:20:36.365445 222878 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1129 09:20:36.365513 222878 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1129 09:20:36.365576 222878 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1129 09:20:36.365638 222878 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1129 09:20:36.365702 222878 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1129 09:20:36.365769 222878 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1129 09:20:36.365832 222878 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1129 09:20:36.365884 222878 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1129 09:20:36.435193 222878 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1129 09:20:36.435380 222878 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1129 09:20:36.435539 222878 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1129 09:20:36.441349 222878 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1129 09:20:36.446636 222878 out.go:252] - Generating certificates and keys ...
I1129 09:20:36.446799 222878 kubeadm.go:319] [certs] Using existing ca certificate authority
I1129 09:20:36.446906 222878 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1129 09:20:37.362846 222878 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1129 09:20:37.721165 222878 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1129 09:20:37.949639 222878 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1129 09:20:38.413017 222878 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1129 09:20:38.775660 222878 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1129 09:20:38.776186 222878 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost no-preload-230403] and IPs [192.168.85.2 127.0.0.1 ::1]
I1129 09:20:39.104705 222878 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1129 09:20:39.105064 222878 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost no-preload-230403] and IPs [192.168.85.2 127.0.0.1 ::1]
I1129 09:20:39.359331 222878 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1129 09:20:39.818423 222878 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1129 09:20:39.880381 222878 kubeadm.go:319] [certs] Generating "sa" key and public key
I1129 09:20:39.880638 222878 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1129 09:20:41.216161 222878 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1129 09:20:42.199207 222878 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1129 09:20:42.918813 222878 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1129 09:20:43.410581 222878 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1129 09:20:43.826978 222878 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1129 09:20:43.827675 222878 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1129 09:20:43.830453 222878 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1129 09:20:43.834084 222878 out.go:252] - Booting up control plane ...
I1129 09:20:43.834197 222878 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1129 09:20:43.834283 222878 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1129 09:20:43.834359 222878 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1129 09:20:43.851485 222878 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1129 09:20:43.851654 222878 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1129 09:20:43.861644 222878 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1129 09:20:43.863805 222878 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1129 09:20:43.864136 222878 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1129 09:20:44.015245 222878 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1129 09:20:44.015367 222878 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1129 09:20:45.517833 222878 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.502807558s
I1129 09:20:45.522544 222878 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1129 09:20:45.522646 222878 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.85.2:8443/livez
I1129 09:20:45.522745 222878 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1129 09:20:45.522825 222878 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
b9e829b9abde5 1611cd07b61d5 12 seconds ago Running busybox 0 ddd79130415cc busybox default
f8f1e6dc2605a 97e04611ad434 19 seconds ago Running coredns 0 0f3ce8e6c4105 coredns-5dd5756b68-htmzr kube-system
359d9432ef497 ba04bb24b9575 20 seconds ago Running storage-provisioner 0 66250dabca2c6 storage-provisioner kube-system
db1d77c6c85ea b1a8c6f707935 31 seconds ago Running kindnet-cni 0 78bf9329ff249 kindnet-58g5f kube-system
000a8de26034d 940f54a5bcae9 33 seconds ago Running kube-proxy 0 ec6c1087a251f kube-proxy-4jxrn kube-system
c6e9c9ab04ae1 46cc66ccc7c19 55 seconds ago Running kube-controller-manager 0 16b3e81e696c9 kube-controller-manager-old-k8s-version-071895 kube-system
41dff26eb8e67 9cdd6470f48c8 55 seconds ago Running etcd 0 468f2a4d8c24a etcd-old-k8s-version-071895 kube-system
d34a4ced6121d 00543d2fe5d71 55 seconds ago Running kube-apiserver 0 9630ead47757e kube-apiserver-old-k8s-version-071895 kube-system
7c5e9c05d20b8 762dce4090c5f 55 seconds ago Running kube-scheduler 0 676bacb96168a kube-scheduler-old-k8s-version-071895 kube-system
==> containerd <==
Nov 29 09:20:29 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:29.985394384Z" level=info msg="connecting to shim 359d9432ef4979d387512d5a2a5a3cd9fb7a0987f4a3540a23407b70f7faf163" address="unix:///run/containerd/s/34373f541c51fce0619cd6b7f9bbe560b47e8c8788713a29595219a5d22d901b" protocol=ttrpc version=3
Nov 29 09:20:29 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:29.992937298Z" level=info msg="CreateContainer within sandbox \"0f3ce8e6c41050910070bab1a2edce113b2eb3bd98f3bca1d8006c18bcd1714f\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Nov 29 09:20:30 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:30.071685071Z" level=info msg="Container f8f1e6dc2605a052d9e0af268d75e52d11eef09c6da328c174daa4346e21359d: CDI devices from CRI Config.CDIDevices: []"
Nov 29 09:20:30 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:30.095345483Z" level=info msg="CreateContainer within sandbox \"0f3ce8e6c41050910070bab1a2edce113b2eb3bd98f3bca1d8006c18bcd1714f\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"f8f1e6dc2605a052d9e0af268d75e52d11eef09c6da328c174daa4346e21359d\""
Nov 29 09:20:30 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:30.097739089Z" level=info msg="StartContainer for \"f8f1e6dc2605a052d9e0af268d75e52d11eef09c6da328c174daa4346e21359d\""
Nov 29 09:20:30 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:30.099238569Z" level=info msg="connecting to shim f8f1e6dc2605a052d9e0af268d75e52d11eef09c6da328c174daa4346e21359d" address="unix:///run/containerd/s/3150843ad07ed5a21377bb0ba6fe93d3c73033d9ccfa3b4a9e0ed16a5e8438c5" protocol=ttrpc version=3
Nov 29 09:20:30 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:30.189374834Z" level=info msg="StartContainer for \"359d9432ef4979d387512d5a2a5a3cd9fb7a0987f4a3540a23407b70f7faf163\" returns successfully"
Nov 29 09:20:30 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:30.269744369Z" level=info msg="StartContainer for \"f8f1e6dc2605a052d9e0af268d75e52d11eef09c6da328c174daa4346e21359d\" returns successfully"
Nov 29 09:20:35 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:35.534277133Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:3abcbd08-d7c4-4a13-b94c-6f6424975411,Namespace:default,Attempt:0,}"
Nov 29 09:20:35 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:35.597130515Z" level=info msg="connecting to shim ddd79130415cc8649c69caccfc081affa5f1da8a2517127cdbcf8d824a791490" address="unix:///run/containerd/s/6d78da511a42142891dae64b3eb6a171701a2aacf243055415398ac4ec21cd7a" namespace=k8s.io protocol=ttrpc version=3
Nov 29 09:20:35 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:35.703469012Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:3abcbd08-d7c4-4a13-b94c-6f6424975411,Namespace:default,Attempt:0,} returns sandbox id \"ddd79130415cc8649c69caccfc081affa5f1da8a2517127cdbcf8d824a791490\""
Nov 29 09:20:35 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:35.712136437Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.805646978Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.808907002Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=1937188"
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.812726259Z" level=info msg="ImageCreate event name:\"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.815034818Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.816002472Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"1935750\" in 2.103636897s"
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.816153291Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\""
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.820479635Z" level=info msg="CreateContainer within sandbox \"ddd79130415cc8649c69caccfc081affa5f1da8a2517127cdbcf8d824a791490\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.837353396Z" level=info msg="Container b9e829b9abde5402e2cbe089579fccb3fcaa2d4225461d6d9fe9bceddbff0c20: CDI devices from CRI Config.CDIDevices: []"
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.849004626Z" level=info msg="CreateContainer within sandbox \"ddd79130415cc8649c69caccfc081affa5f1da8a2517127cdbcf8d824a791490\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"b9e829b9abde5402e2cbe089579fccb3fcaa2d4225461d6d9fe9bceddbff0c20\""
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.852339424Z" level=info msg="StartContainer for \"b9e829b9abde5402e2cbe089579fccb3fcaa2d4225461d6d9fe9bceddbff0c20\""
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.855127486Z" level=info msg="connecting to shim b9e829b9abde5402e2cbe089579fccb3fcaa2d4225461d6d9fe9bceddbff0c20" address="unix:///run/containerd/s/6d78da511a42142891dae64b3eb6a171701a2aacf243055415398ac4ec21cd7a" protocol=ttrpc version=3
Nov 29 09:20:37 old-k8s-version-071895 containerd[758]: time="2025-11-29T09:20:37.962061310Z" level=info msg="StartContainer for \"b9e829b9abde5402e2cbe089579fccb3fcaa2d4225461d6d9fe9bceddbff0c20\" returns successfully"
Nov 29 09:20:44 old-k8s-version-071895 containerd[758]: E1129 09:20:44.932672 758 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [f8f1e6dc2605a052d9e0af268d75e52d11eef09c6da328c174daa4346e21359d] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = b7aacdf6a6aa730aafe4d018cac9b7b5ecfb346cba84a99f64521f87aef8b4958639c1cf97967716465791d05bd38f372615327b7cb1d93c850bae532744d54d
CoreDNS-1.10.1
linux/arm64, go1.20, 055b2c3
[INFO] 127.0.0.1:51515 - 3634 "HINFO IN 3397046818821823914.8081764445601178770. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.005882235s
==> describe nodes <==
Name: old-k8s-version-071895
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=old-k8s-version-071895
kubernetes.io/os=linux
minikube.k8s.io/commit=d0eb20ec824c82ab3f24099c8b785e0a2a5789af
minikube.k8s.io/name=old-k8s-version-071895
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_29T09_20_04_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sat, 29 Nov 2025 09:19:58 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-071895
AcquireTime: <unset>
RenewTime: Sat, 29 Nov 2025 09:20:43 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sat, 29 Nov 2025 09:20:33 +0000 Sat, 29 Nov 2025 09:19:55 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 29 Nov 2025 09:20:33 +0000 Sat, 29 Nov 2025 09:19:55 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 29 Nov 2025 09:20:33 +0000 Sat, 29 Nov 2025 09:19:55 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sat, 29 Nov 2025 09:20:33 +0000 Sat, 29 Nov 2025 09:20:29 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: old-k8s-version-071895
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 7283ea1857f18f20a875c29069214c9d
System UUID: 453a3f46-be9b-4440-b54b-7bd5b2275c63
Boot ID: 6647f078-4edd-40c5-9d0e-49eb5ed00bd7
Kernel Version: 5.15.0-1084-aws
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 17s
kube-system coredns-5dd5756b68-htmzr 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 35s
kube-system etcd-old-k8s-version-071895 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 47s
kube-system kindnet-58g5f 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 35s
kube-system kube-apiserver-old-k8s-version-071895 250m (12%) 0 (0%) 0 (0%) 0 (0%) 51s
kube-system kube-controller-manager-old-k8s-version-071895 200m (10%) 0 (0%) 0 (0%) 0 (0%) 47s
kube-system kube-proxy-4jxrn 0 (0%) 0 (0%) 0 (0%) 0 (0%) 35s
kube-system kube-scheduler-old-k8s-version-071895 100m (5%) 0 (0%) 0 (0%) 0 (0%) 47s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 33s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 100m (5%)
memory 220Mi (2%) 220Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 33s kube-proxy
Normal Starting 48s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 48s kubelet Node old-k8s-version-071895 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 48s kubelet Node old-k8s-version-071895 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 48s kubelet Node old-k8s-version-071895 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 47s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 36s node-controller Node old-k8s-version-071895 event: Registered Node old-k8s-version-071895 in Controller
Normal NodeReady 21s kubelet Node old-k8s-version-071895 status is now: NodeReady
==> dmesg <==
[Nov29 08:17] ACPI: SRAT not present
[ +0.000000] ACPI: SRAT not present
[ +0.000000] SPI driver altr_a10sr has no spi_device_id for altr,a10sr
[ +0.014634] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.570975] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.032231] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +0.767655] ena 0000:00:05.0: LLQ is not supported Fallback to host mode policy.
[ +6.282538] kauditd_printk_skb: 36 callbacks suppressed
[Nov29 08:39] FS-Cache: Duplicate cookie detected
[ +0.000727] FS-Cache: O-cookie c=00000013 [p=00000002 fl=222 nc=0 na=1]
[ +0.001077] FS-Cache: O-cookie d=00000000b08097f7{9P.session} n=00000000a17ba85f
[ +0.001074] FS-Cache: O-key=[10] '34323935323231393134'
[ +0.000776] FS-Cache: N-cookie c=00000014 [p=00000002 fl=2 nc=0 na=1]
[ +0.000954] FS-Cache: N-cookie d=00000000b08097f7{9P.session} n=00000000534469ad
[ +0.001092] FS-Cache: N-key=[10] '34323935323231393134'
[Nov29 09:19] hrtimer: interrupt took 12545193 ns
==> etcd [41dff26eb8e679cc29a87f83f59d117073bdaeb9ac41cb8ac8ee1cb32c92511a] <==
{"level":"info","ts":"2025-11-29T09:19:54.897611Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-11-29T09:19:54.901566Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"ea7e25599daad906","initial-advertise-peer-urls":["https://192.168.76.2:2380"],"listen-peer-urls":["https://192.168.76.2:2380"],"advertise-client-urls":["https://192.168.76.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.76.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-29T09:19:54.901625Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-29T09:19:55.060661Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-29T09:19:55.060785Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-29T09:19:55.060882Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgPreVoteResp from ea7e25599daad906 at term 1"}
{"level":"info","ts":"2025-11-29T09:19:55.060949Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became candidate at term 2"}
{"level":"info","ts":"2025-11-29T09:19:55.060985Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgVoteResp from ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-29T09:19:55.061056Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became leader at term 2"}
{"level":"info","ts":"2025-11-29T09:19:55.06113Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: ea7e25599daad906 elected leader ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-29T09:19:55.062447Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"ea7e25599daad906","local-member-attributes":"{Name:old-k8s-version-071895 ClientURLs:[https://192.168.76.2:2379]}","request-path":"/0/members/ea7e25599daad906/attributes","cluster-id":"6f20f2c4b2fb5f8a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-29T09:19:55.062536Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-29T09:19:55.063797Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-29T09:19:55.063991Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:19:55.065852Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-29T09:19:55.066951Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.76.2:2379"}
{"level":"info","ts":"2025-11-29T09:19:55.067534Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:19:55.067717Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:19:55.071793Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-29T09:19:55.071959Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-29T09:19:55.072006Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-29T09:20:15.052803Z","caller":"traceutil/trace.go:171","msg":"trace[25407896] transaction","detail":"{read_only:false; response_revision:297; number_of_response:1; }","duration":"106.818617ms","start":"2025-11-29T09:20:14.945956Z","end":"2025-11-29T09:20:15.052774Z","steps":["trace[25407896] 'process raft request' (duration: 106.616925ms)"],"step_count":1}
{"level":"info","ts":"2025-11-29T09:20:15.064957Z","caller":"traceutil/trace.go:171","msg":"trace[1542162002] transaction","detail":"{read_only:false; response_revision:300; number_of_response:1; }","duration":"106.599802ms","start":"2025-11-29T09:20:14.95834Z","end":"2025-11-29T09:20:15.064939Z","steps":["trace[1542162002] 'process raft request' (duration: 106.563165ms)"],"step_count":1}
{"level":"info","ts":"2025-11-29T09:20:15.065342Z","caller":"traceutil/trace.go:171","msg":"trace[758518492] transaction","detail":"{read_only:false; response_revision:298; number_of_response:1; }","duration":"119.137568ms","start":"2025-11-29T09:20:14.946194Z","end":"2025-11-29T09:20:15.065332Z","steps":["trace[758518492] 'process raft request' (duration: 118.584375ms)"],"step_count":1}
{"level":"info","ts":"2025-11-29T09:20:15.065438Z","caller":"traceutil/trace.go:171","msg":"trace[2009828336] transaction","detail":"{read_only:false; response_revision:299; number_of_response:1; }","duration":"112.325548ms","start":"2025-11-29T09:20:14.953105Z","end":"2025-11-29T09:20:15.065431Z","steps":["trace[2009828336] 'process raft request' (duration: 111.76593ms)"],"step_count":1}
==> kernel <==
09:20:50 up 1:03, 0 user, load average: 3.24, 2.71, 2.63
Linux old-k8s-version-071895 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [db1d77c6c85eaf5ebd7dc839fb54d40271ee80c34795b249a47534f35c064f1c] <==
I1129 09:20:19.083145 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1129 09:20:19.083520 1 main.go:139] hostIP = 192.168.76.2
podIP = 192.168.76.2
I1129 09:20:19.083647 1 main.go:148] setting mtu 1500 for CNI
I1129 09:20:19.083659 1 main.go:178] kindnetd IP family: "ipv4"
I1129 09:20:19.083671 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-29T09:20:19Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1129 09:20:19.286160 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1129 09:20:19.286239 1 controller.go:381] "Waiting for informer caches to sync"
I1129 09:20:19.286373 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1129 09:20:19.287882 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1129 09:20:19.580767 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1129 09:20:19.580802 1 metrics.go:72] Registering metrics
I1129 09:20:19.580865 1 controller.go:711] "Syncing nftables rules"
I1129 09:20:29.287220 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1129 09:20:29.287264 1 main.go:301] handling current node
I1129 09:20:39.286004 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1129 09:20:39.286281 1 main.go:301] handling current node
I1129 09:20:49.294522 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1129 09:20:49.294558 1 main.go:301] handling current node
==> kube-apiserver [d34a4ced6121deea5f0e58655a9a45e86fccdde412c9acf3d1e35ab330cd1b4b] <==
I1129 09:19:58.687723 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1129 09:19:58.689876 1 apf_controller.go:377] Running API Priority and Fairness config worker
I1129 09:19:58.689902 1 apf_controller.go:380] Running API Priority and Fairness periodic rebalancing process
I1129 09:19:58.690079 1 cache.go:39] Caches are synced for AvailableConditionController controller
I1129 09:19:58.691126 1 aggregator.go:166] initial CRD sync complete...
I1129 09:19:58.691143 1 autoregister_controller.go:141] Starting autoregister controller
I1129 09:19:58.691150 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1129 09:19:58.691158 1 cache.go:39] Caches are synced for autoregister controller
E1129 09:19:58.752402 1 controller.go:145] while syncing ConfigMap "kube-system/kube-apiserver-legacy-service-account-token-tracking", err: namespaces "kube-system" not found
I1129 09:19:58.885509 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1129 09:19:59.184340 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1129 09:19:59.194717 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1129 09:19:59.195065 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1129 09:20:00.545658 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1129 09:20:00.693098 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1129 09:20:00.863619 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1129 09:20:00.877801 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I1129 09:20:00.879300 1 controller.go:624] quota admission added evaluator for: endpoints
I1129 09:20:00.885677 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1129 09:20:01.758115 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1129 09:20:02.382429 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1129 09:20:02.396930 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1129 09:20:02.411199 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1129 09:20:15.297358 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1129 09:20:15.463834 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [c6e9c9ab04ae16e634fbb9b4e1d16587356b43ecc4799412da2e56e79409870b] <==
I1129 09:20:15.111764 1 event.go:307] "Event occurred" object="kube-system/kube-scheduler-old-k8s-version-071895" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1129 09:20:15.174792 1 event.go:307] "Event occurred" object="kube-system/kube-apiserver-old-k8s-version-071895" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1129 09:20:15.320980 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1129 09:20:15.351255 1 shared_informer.go:318] Caches are synced for garbage collector
I1129 09:20:15.351286 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1129 09:20:15.384164 1 shared_informer.go:318] Caches are synced for garbage collector
I1129 09:20:15.486462 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-4jxrn"
I1129 09:20:15.486489 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-58g5f"
I1129 09:20:15.643761 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-rk2xx"
I1129 09:20:15.661237 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-htmzr"
I1129 09:20:15.701868 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="389.914526ms"
I1129 09:20:15.744722 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="42.452889ms"
I1129 09:20:15.746651 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="83.963µs"
I1129 09:20:17.246540 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1129 09:20:17.300225 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-rk2xx"
I1129 09:20:17.312673 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="66.947307ms"
I1129 09:20:17.322261 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="9.52965ms"
I1129 09:20:17.323333 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="806.512µs"
I1129 09:20:29.431259 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="83.143µs"
I1129 09:20:29.490111 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="119.681µs"
I1129 09:20:30.130582 1 event.go:307] "Event occurred" object="kube-system/storage-provisioner" fieldPath="" kind="Pod" apiVersion="" type="Normal" reason="TaintManagerEviction" message="Cancelling deletion of Pod kube-system/storage-provisioner"
I1129 09:20:30.130619 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68-htmzr" fieldPath="" kind="Pod" apiVersion="" type="Normal" reason="TaintManagerEviction" message="Cancelling deletion of Pod kube-system/coredns-5dd5756b68-htmzr"
I1129 09:20:30.131138 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
I1129 09:20:31.018335 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="24.226889ms"
I1129 09:20:31.018459 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="77.794µs"
==> kube-proxy [000a8de26034dcdc6da38237d77f79fa914b3088e593f0bbd13e14b39b42bf00] <==
I1129 09:20:16.555876 1 server_others.go:69] "Using iptables proxy"
I1129 09:20:16.579548 1 node.go:141] Successfully retrieved node IP: 192.168.76.2
I1129 09:20:16.643168 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1129 09:20:16.645058 1 server_others.go:152] "Using iptables Proxier"
I1129 09:20:16.645109 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1129 09:20:16.645128 1 server_others.go:438] "Defaulting to no-op detect-local"
I1129 09:20:16.645164 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1129 09:20:16.645384 1 server.go:846] "Version info" version="v1.28.0"
I1129 09:20:16.645401 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1129 09:20:16.657042 1 config.go:188] "Starting service config controller"
I1129 09:20:16.657067 1 shared_informer.go:311] Waiting for caches to sync for service config
I1129 09:20:16.657128 1 config.go:97] "Starting endpoint slice config controller"
I1129 09:20:16.657132 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1129 09:20:16.657163 1 config.go:315] "Starting node config controller"
I1129 09:20:16.657166 1 shared_informer.go:311] Waiting for caches to sync for node config
I1129 09:20:16.757328 1 shared_informer.go:318] Caches are synced for node config
I1129 09:20:16.757472 1 shared_informer.go:318] Caches are synced for endpoint slice config
I1129 09:20:16.757514 1 shared_informer.go:318] Caches are synced for service config
==> kube-scheduler [7c5e9c05d20b870a1e96cdb0bbf1479f013609a2bbcde73ff5f9b106d4a35049] <==
I1129 09:19:58.666858 1 serving.go:348] Generated self-signed cert in-memory
W1129 09:20:00.321955 1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W1129 09:20:00.322235 1 authentication.go:368] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W1129 09:20:00.322326 1 authentication.go:369] Continuing without authentication configuration. This may treat all requests as anonymous.
W1129 09:20:00.322411 1 authentication.go:370] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I1129 09:20:00.396927 1 server.go:154] "Starting Kubernetes Scheduler" version="v1.28.0"
I1129 09:20:00.399854 1 server.go:156] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1129 09:20:00.419574 1 secure_serving.go:210] Serving securely on 127.0.0.1:10259
I1129 09:20:00.431997 1 configmap_cafile_content.go:202] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1129 09:20:00.432131 1 shared_informer.go:311] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I1129 09:20:00.432227 1 tlsconfig.go:240] "Starting DynamicServingCertificateController"
W1129 09:20:00.482293 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1129 09:20:00.482341 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
I1129 09:20:01.932942 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 29 09:20:15 old-k8s-version-071895 kubelet[1545]: I1129 09:20:15.542826 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/3e4bdb82-85e5-468b-80dc-0481c990f117-kube-proxy\") pod \"kube-proxy-4jxrn\" (UID: \"3e4bdb82-85e5-468b-80dc-0481c990f117\") " pod="kube-system/kube-proxy-4jxrn"
Nov 29 09:20:15 old-k8s-version-071895 kubelet[1545]: I1129 09:20:15.542946 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/d4743cee-0834-4a44-9cf7-d0228aa5b843-cni-cfg\") pod \"kindnet-58g5f\" (UID: \"d4743cee-0834-4a44-9cf7-d0228aa5b843\") " pod="kube-system/kindnet-58g5f"
Nov 29 09:20:15 old-k8s-version-071895 kubelet[1545]: I1129 09:20:15.543093 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/d4743cee-0834-4a44-9cf7-d0228aa5b843-xtables-lock\") pod \"kindnet-58g5f\" (UID: \"d4743cee-0834-4a44-9cf7-d0228aa5b843\") " pod="kube-system/kindnet-58g5f"
Nov 29 09:20:15 old-k8s-version-071895 kubelet[1545]: I1129 09:20:15.543236 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcrqh\" (UniqueName: \"kubernetes.io/projected/3e4bdb82-85e5-468b-80dc-0481c990f117-kube-api-access-zcrqh\") pod \"kube-proxy-4jxrn\" (UID: \"3e4bdb82-85e5-468b-80dc-0481c990f117\") " pod="kube-system/kube-proxy-4jxrn"
Nov 29 09:20:15 old-k8s-version-071895 kubelet[1545]: I1129 09:20:15.543388 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d4743cee-0834-4a44-9cf7-d0228aa5b843-lib-modules\") pod \"kindnet-58g5f\" (UID: \"d4743cee-0834-4a44-9cf7-d0228aa5b843\") " pod="kube-system/kindnet-58g5f"
Nov 29 09:20:15 old-k8s-version-071895 kubelet[1545]: I1129 09:20:15.543527 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfjbl\" (UniqueName: \"kubernetes.io/projected/d4743cee-0834-4a44-9cf7-d0228aa5b843-kube-api-access-hfjbl\") pod \"kindnet-58g5f\" (UID: \"d4743cee-0834-4a44-9cf7-d0228aa5b843\") " pod="kube-system/kindnet-58g5f"
Nov 29 09:20:16 old-k8s-version-071895 kubelet[1545]: I1129 09:20:16.904236 1545 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-4jxrn" podStartSLOduration=1.904182809 podCreationTimestamp="2025-11-29 09:20:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 09:20:16.903944744 +0000 UTC m=+14.574033893" watchObservedRunningTime="2025-11-29 09:20:16.904182809 +0000 UTC m=+14.574271949"
Nov 29 09:20:22 old-k8s-version-071895 kubelet[1545]: I1129 09:20:22.690149 1545 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-58g5f" podStartSLOduration=5.068996977 podCreationTimestamp="2025-11-29 09:20:15 +0000 UTC" firstStartedPulling="2025-11-29 09:20:16.131889821 +0000 UTC m=+13.801978953" lastFinishedPulling="2025-11-29 09:20:18.75299704 +0000 UTC m=+16.423086171" observedRunningTime="2025-11-29 09:20:19.919717563 +0000 UTC m=+17.589806703" watchObservedRunningTime="2025-11-29 09:20:22.690104195 +0000 UTC m=+20.360193335"
Nov 29 09:20:29 old-k8s-version-071895 kubelet[1545]: I1129 09:20:29.372571 1545 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 29 09:20:29 old-k8s-version-071895 kubelet[1545]: I1129 09:20:29.424392 1545 topology_manager.go:215] "Topology Admit Handler" podUID="784fe707-ae15-4eae-a70c-ec084ce3d812" podNamespace="kube-system" podName="storage-provisioner"
Nov 29 09:20:29 old-k8s-version-071895 kubelet[1545]: I1129 09:20:29.431465 1545 topology_manager.go:215] "Topology Admit Handler" podUID="c6b5f2ee-df4f-40a3-be2e-6f16e858e497" podNamespace="kube-system" podName="coredns-5dd5756b68-htmzr"
Nov 29 09:20:29 old-k8s-version-071895 kubelet[1545]: I1129 09:20:29.459512 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/784fe707-ae15-4eae-a70c-ec084ce3d812-tmp\") pod \"storage-provisioner\" (UID: \"784fe707-ae15-4eae-a70c-ec084ce3d812\") " pod="kube-system/storage-provisioner"
Nov 29 09:20:29 old-k8s-version-071895 kubelet[1545]: I1129 09:20:29.459744 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzcr9\" (UniqueName: \"kubernetes.io/projected/784fe707-ae15-4eae-a70c-ec084ce3d812-kube-api-access-hzcr9\") pod \"storage-provisioner\" (UID: \"784fe707-ae15-4eae-a70c-ec084ce3d812\") " pod="kube-system/storage-provisioner"
Nov 29 09:20:29 old-k8s-version-071895 kubelet[1545]: I1129 09:20:29.459885 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch9tz\" (UniqueName: \"kubernetes.io/projected/c6b5f2ee-df4f-40a3-be2e-6f16e858e497-kube-api-access-ch9tz\") pod \"coredns-5dd5756b68-htmzr\" (UID: \"c6b5f2ee-df4f-40a3-be2e-6f16e858e497\") " pod="kube-system/coredns-5dd5756b68-htmzr"
Nov 29 09:20:29 old-k8s-version-071895 kubelet[1545]: I1129 09:20:29.460022 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c6b5f2ee-df4f-40a3-be2e-6f16e858e497-config-volume\") pod \"coredns-5dd5756b68-htmzr\" (UID: \"c6b5f2ee-df4f-40a3-be2e-6f16e858e497\") " pod="kube-system/coredns-5dd5756b68-htmzr"
Nov 29 09:20:30 old-k8s-version-071895 kubelet[1545]: I1129 09:20:30.997910 1545 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=13.997856203 podCreationTimestamp="2025-11-29 09:20:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 09:20:30.970917027 +0000 UTC m=+28.641006167" watchObservedRunningTime="2025-11-29 09:20:30.997856203 +0000 UTC m=+28.667945343"
Nov 29 09:20:33 old-k8s-version-071895 kubelet[1545]: I1129 09:20:33.708750 1545 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-htmzr" podStartSLOduration=18.708653504 podCreationTimestamp="2025-11-29 09:20:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 09:20:30.99830195 +0000 UTC m=+28.668391090" watchObservedRunningTime="2025-11-29 09:20:33.708653504 +0000 UTC m=+31.378742653"
Nov 29 09:20:33 old-k8s-version-071895 kubelet[1545]: I1129 09:20:33.709581 1545 topology_manager.go:215] "Topology Admit Handler" podUID="3abcbd08-d7c4-4a13-b94c-6f6424975411" podNamespace="default" podName="busybox"
Nov 29 09:20:33 old-k8s-version-071895 kubelet[1545]: W1129 09:20:33.759772 1545 reflector.go:535] object-"default"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:old-k8s-version-071895" cannot list resource "configmaps" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-071895' and this object
Nov 29 09:20:33 old-k8s-version-071895 kubelet[1545]: E1129 09:20:33.759821 1545 reflector.go:147] object-"default"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:old-k8s-version-071895" cannot list resource "configmaps" in API group "" in the namespace "default": no relationship found between node 'old-k8s-version-071895' and this object
Nov 29 09:20:33 old-k8s-version-071895 kubelet[1545]: I1129 09:20:33.794129 1545 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7w6jg\" (UniqueName: \"kubernetes.io/projected/3abcbd08-d7c4-4a13-b94c-6f6424975411-kube-api-access-7w6jg\") pod \"busybox\" (UID: \"3abcbd08-d7c4-4a13-b94c-6f6424975411\") " pod="default/busybox"
Nov 29 09:20:34 old-k8s-version-071895 kubelet[1545]: E1129 09:20:34.906850 1545 projected.go:292] Couldn't get configMap default/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition
Nov 29 09:20:34 old-k8s-version-071895 kubelet[1545]: E1129 09:20:34.908357 1545 projected.go:198] Error preparing data for projected volume kube-api-access-7w6jg for pod default/busybox: failed to sync configmap cache: timed out waiting for the condition
Nov 29 09:20:34 old-k8s-version-071895 kubelet[1545]: E1129 09:20:34.908523 1545 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3abcbd08-d7c4-4a13-b94c-6f6424975411-kube-api-access-7w6jg podName:3abcbd08-d7c4-4a13-b94c-6f6424975411 nodeName:}" failed. No retries permitted until 2025-11-29 09:20:35.408496185 +0000 UTC m=+33.078585316 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-7w6jg" (UniqueName: "kubernetes.io/projected/3abcbd08-d7c4-4a13-b94c-6f6424975411-kube-api-access-7w6jg") pod "busybox" (UID: "3abcbd08-d7c4-4a13-b94c-6f6424975411") : failed to sync configmap cache: timed out waiting for the condition
Nov 29 09:20:37 old-k8s-version-071895 kubelet[1545]: I1129 09:20:37.992486 1545 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=2.8817945099999998 podCreationTimestamp="2025-11-29 09:20:33 +0000 UTC" firstStartedPulling="2025-11-29 09:20:35.706009491 +0000 UTC m=+33.376098623" lastFinishedPulling="2025-11-29 09:20:37.816649729 +0000 UTC m=+35.486738869" observedRunningTime="2025-11-29 09:20:37.991952135 +0000 UTC m=+35.662041292" watchObservedRunningTime="2025-11-29 09:20:37.992434756 +0000 UTC m=+35.662523896"
==> storage-provisioner [359d9432ef4979d387512d5a2a5a3cd9fb7a0987f4a3540a23407b70f7faf163] <==
I1129 09:20:30.214942 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1129 09:20:30.235967 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1129 09:20:30.236210 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1129 09:20:30.252227 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1129 09:20:30.255628 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-071895_105725d4-e591-4aa3-af10-2659a9fed2c2!
I1129 09:20:30.273258 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"d8dbb900-fced-4c3d-a6ea-15b88c536670", APIVersion:"v1", ResourceVersion:"415", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-071895_105725d4-e591-4aa3-af10-2659a9fed2c2 became leader
I1129 09:20:30.355956 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-071895_105725d4-e591-4aa3-af10-2659a9fed2c2!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-071895 -n old-k8s-version-071895
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-071895 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestStartStop/group/old-k8s-version/serial/DeployApp (18.69s)