=== RUN TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-180638 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [54457203-a4b0-4bfe-b7e6-9804ec70353f] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [54457203-a4b0-4bfe-b7e6-9804ec70353f] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 9.003159432s
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-180638 exec busybox -- /bin/sh -c "ulimit -n"
start_stop_delete_test.go:194: 'ulimit -n' returned 1024, expected 1048576
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-180638
helpers_test.go:243: (dbg) docker inspect old-k8s-version-180638:
-- stdout --
[
{
"Id": "3fb449072f419f1d1ff9eebb56f96c76cc24ab8ceb8213db71616f0ddddcbb9f",
"Created": "2025-11-23T08:41:19.865592877Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 197224,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-23T08:41:19.943635138Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:572c983e466f1f784136812eef5cc59ac623db764bc7704d3676c4643993fd08",
"ResolvConfPath": "/var/lib/docker/containers/3fb449072f419f1d1ff9eebb56f96c76cc24ab8ceb8213db71616f0ddddcbb9f/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/3fb449072f419f1d1ff9eebb56f96c76cc24ab8ceb8213db71616f0ddddcbb9f/hostname",
"HostsPath": "/var/lib/docker/containers/3fb449072f419f1d1ff9eebb56f96c76cc24ab8ceb8213db71616f0ddddcbb9f/hosts",
"LogPath": "/var/lib/docker/containers/3fb449072f419f1d1ff9eebb56f96c76cc24ab8ceb8213db71616f0ddddcbb9f/3fb449072f419f1d1ff9eebb56f96c76cc24ab8ceb8213db71616f0ddddcbb9f-json.log",
"Name": "/old-k8s-version-180638",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-180638:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "old-k8s-version-180638",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "3fb449072f419f1d1ff9eebb56f96c76cc24ab8ceb8213db71616f0ddddcbb9f",
"LowerDir": "/var/lib/docker/overlay2/3a0f954d6f7082ad577dca92fa6658b1e327bb820ce9a801d55d584f14165f01-init/diff:/var/lib/docker/overlay2/88c30082a717909d357f7d81c88a05ce3487a40d372ee6dc57fb9f012e0502da/diff",
"MergedDir": "/var/lib/docker/overlay2/3a0f954d6f7082ad577dca92fa6658b1e327bb820ce9a801d55d584f14165f01/merged",
"UpperDir": "/var/lib/docker/overlay2/3a0f954d6f7082ad577dca92fa6658b1e327bb820ce9a801d55d584f14165f01/diff",
"WorkDir": "/var/lib/docker/overlay2/3a0f954d6f7082ad577dca92fa6658b1e327bb820ce9a801d55d584f14165f01/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-180638",
"Source": "/var/lib/docker/volumes/old-k8s-version-180638/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-180638",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-180638",
"name.minikube.sigs.k8s.io": "old-k8s-version-180638",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "c7b0d9b425062d52a0c8052c45b2a62780ff3f6f2620c50e9e88251d56098ed9",
"SandboxKey": "/var/run/docker/netns/c7b0d9b42506",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33053"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33054"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33057"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33055"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33056"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"old-k8s-version-180638": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "66:cc:5c:df:67:d2",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "ec0f96b222364b6472248735ae9433b2f33bdeaa152953368412a68215eb42c4",
"EndpointID": "20998764ba69f988f94705bb48be4dc33edbb29c350250a4be2539cea69e130e",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-180638",
"3fb449072f41"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-180638 -n old-k8s-version-180638
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p old-k8s-version-180638 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p old-k8s-version-180638 logs -n 25: (1.230253972s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────┬─────────┬─────────┬─────────────────────┬────────────
─────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────┼─────────┼─────────┼─────────────────────┼────────────
─────────┤
│ ssh │ -p cilium-440243 sudo cat /etc/docker/daemon.json │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo docker system info │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo systemctl status cri-docker --all --full --no-pager │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo systemctl cat cri-docker --no-pager │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo cri-dockerd --version │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo systemctl status containerd --all --full --no-pager │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo systemctl cat containerd --no-pager │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo cat /lib/systemd/system/containerd.service │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo cat /etc/containerd/config.toml │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo containerd config dump │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo systemctl status crio --all --full --no-pager │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo systemctl cat crio --no-pager │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo crio config │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ delete │ -p cilium-440243 │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ 23 Nov 25 08:39 UTC │
│ start │ -p cert-expiration-119748 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=containerd │ cert-expiration-119748 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ 23 Nov 25 08:40 UTC │
│ ssh │ force-systemd-env-760522 ssh cat /etc/containerd/config.toml │ force-systemd-env-760522 │ jenkins │ v1.37.0 │ 23 Nov 25 08:40 UTC │ 23 Nov 25 08:40 UTC │
│ delete │ -p force-systemd-env-760522 │ force-systemd-env-760522 │ jenkins │ v1.37.0 │ 23 Nov 25 08:40 UTC │ 23 Nov 25 08:40 UTC │
│ start │ -p cert-options-106536 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-106536 │ jenkins │ v1.37.0 │ 23 Nov 25 08:40 UTC │ 23 Nov 25 08:41 UTC │
│ ssh │ cert-options-106536 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-106536 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ ssh │ -p cert-options-106536 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-106536 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ delete │ -p cert-options-106536 │ cert-options-106536 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ start │ -p old-k8s-version-180638 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-180638 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:42 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────┴─────────┴─────────┴─────────────────────┴────────────
─────────┘
==> Last Start <==
Log file created at: 2025/11/23 08:41:13
Running on machine: ip-172-31-31-251
Binary: Built with gc go1.25.3 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1123 08:41:13.503798 196829 out.go:360] Setting OutFile to fd 1 ...
I1123 08:41:13.504001 196829 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:41:13.504037 196829 out.go:374] Setting ErrFile to fd 2...
I1123 08:41:13.504057 196829 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:41:13.504449 196829 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-2339/.minikube/bin
I1123 08:41:13.504989 196829 out.go:368] Setting JSON to false
I1123 08:41:13.507307 196829 start.go:133] hostinfo: {"hostname":"ip-172-31-31-251","uptime":5022,"bootTime":1763882251,"procs":187,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
I1123 08:41:13.507402 196829 start.go:143] virtualization:
I1123 08:41:13.511220 196829 out.go:179] * [old-k8s-version-180638] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I1123 08:41:13.515732 196829 out.go:179] - MINIKUBE_LOCATION=21966
I1123 08:41:13.516085 196829 notify.go:221] Checking for updates...
I1123 08:41:13.523195 196829 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1123 08:41:13.526521 196829 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21966-2339/kubeconfig
I1123 08:41:13.529705 196829 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-2339/.minikube
I1123 08:41:13.532894 196829 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I1123 08:41:13.536018 196829 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1123 08:41:13.539629 196829 config.go:182] Loaded profile config "cert-expiration-119748": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1123 08:41:13.539739 196829 driver.go:422] Setting default libvirt URI to qemu:///system
I1123 08:41:13.574366 196829 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I1123 08:41:13.574516 196829 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1123 08:41:13.638032 196829 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-23 08:41:13.62864309 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aa
rch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pa
th:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1123 08:41:13.638135 196829 docker.go:319] overlay module found
I1123 08:41:13.643635 196829 out.go:179] * Using the docker driver based on user configuration
I1123 08:41:13.646835 196829 start.go:309] selected driver: docker
I1123 08:41:13.646859 196829 start.go:927] validating driver "docker" against <nil>
I1123 08:41:13.646879 196829 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1123 08:41:13.647612 196829 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1123 08:41:13.702166 196829 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-23 08:41:13.693228668 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1123 08:41:13.702317 196829 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1123 08:41:13.702534 196829 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 08:41:13.705700 196829 out.go:179] * Using Docker driver with root privileges
I1123 08:41:13.708681 196829 cni.go:84] Creating CNI manager for ""
I1123 08:41:13.708750 196829 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 08:41:13.708770 196829 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1123 08:41:13.708863 196829 start.go:353] cluster config:
{Name:old-k8s-version-180638 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-180638 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSH
AuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:41:13.711891 196829 out.go:179] * Starting "old-k8s-version-180638" primary control-plane node in "old-k8s-version-180638" cluster
I1123 08:41:13.714733 196829 cache.go:134] Beginning downloading kic base image for docker with containerd
I1123 08:41:13.717633 196829 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1123 08:41:13.720589 196829 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1123 08:41:13.720638 196829 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21966-2339/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4
I1123 08:41:13.720665 196829 cache.go:65] Caching tarball of preloaded images
I1123 08:41:13.720676 196829 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1123 08:41:13.720783 196829 preload.go:238] Found /home/jenkins/minikube-integration/21966-2339/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I1123 08:41:13.720794 196829 cache.go:68] Finished verifying existence of preloaded tar for v1.28.0 on containerd
I1123 08:41:13.720923 196829 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/config.json ...
I1123 08:41:13.720948 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/config.json: {Name:mk3fa6091d320fb60049f236674c350f36f8b1c3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:13.740066 196829 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1123 08:41:13.740090 196829 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1123 08:41:13.740110 196829 cache.go:243] Successfully downloaded all kic artifacts
I1123 08:41:13.740140 196829 start.go:360] acquireMachinesLock for old-k8s-version-180638: {Name:mk02adabcbe3b4194eb9b9cf13dfbc9bffd5d61a Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:41:13.740251 196829 start.go:364] duration metric: took 92.325µs to acquireMachinesLock for "old-k8s-version-180638"
I1123 08:41:13.740280 196829 start.go:93] Provisioning new machine with config: &{Name:old-k8s-version-180638 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-180638 Namespace:default APIServerHAVIP:
APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:f
alse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1123 08:41:13.740345 196829 start.go:125] createHost starting for "" (driver="docker")
I1123 08:41:13.743708 196829 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1123 08:41:13.743928 196829 start.go:159] libmachine.API.Create for "old-k8s-version-180638" (driver="docker")
I1123 08:41:13.743964 196829 client.go:173] LocalClient.Create starting
I1123 08:41:13.744044 196829 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca.pem
I1123 08:41:13.744081 196829 main.go:143] libmachine: Decoding PEM data...
I1123 08:41:13.744099 196829 main.go:143] libmachine: Parsing certificate...
I1123 08:41:13.744156 196829 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21966-2339/.minikube/certs/cert.pem
I1123 08:41:13.744179 196829 main.go:143] libmachine: Decoding PEM data...
I1123 08:41:13.744191 196829 main.go:143] libmachine: Parsing certificate...
I1123 08:41:13.744566 196829 cli_runner.go:164] Run: docker network inspect old-k8s-version-180638 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1123 08:41:13.760425 196829 cli_runner.go:211] docker network inspect old-k8s-version-180638 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1123 08:41:13.760511 196829 network_create.go:284] running [docker network inspect old-k8s-version-180638] to gather additional debugging logs...
I1123 08:41:13.760531 196829 cli_runner.go:164] Run: docker network inspect old-k8s-version-180638
W1123 08:41:13.775922 196829 cli_runner.go:211] docker network inspect old-k8s-version-180638 returned with exit code 1
I1123 08:41:13.775955 196829 network_create.go:287] error running [docker network inspect old-k8s-version-180638]: docker network inspect old-k8s-version-180638: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-180638 not found
I1123 08:41:13.775968 196829 network_create.go:289] output of [docker network inspect old-k8s-version-180638]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-180638 not found
** /stderr **
I1123 08:41:13.776076 196829 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1123 08:41:13.792199 196829 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-a946cc9c0edf IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:de:ea:52:17:a9:7a} reservation:<nil>}
I1123 08:41:13.792559 196829 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-fb33daef15c9 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:aa:08:1d:d1:c6:df} reservation:<nil>}
I1123 08:41:13.792931 196829 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-fb61edac6088 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:e6:64:59:e2:c3:5a} reservation:<nil>}
I1123 08:41:13.793382 196829 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001a1e140}
I1123 08:41:13.793443 196829 network_create.go:124] attempt to create docker network old-k8s-version-180638 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I1123 08:41:13.793513 196829 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-180638 old-k8s-version-180638
I1123 08:41:13.859515 196829 network_create.go:108] docker network old-k8s-version-180638 192.168.76.0/24 created
I1123 08:41:13.859564 196829 kic.go:121] calculated static IP "192.168.76.2" for the "old-k8s-version-180638" container
I1123 08:41:13.859638 196829 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1123 08:41:13.877503 196829 cli_runner.go:164] Run: docker volume create old-k8s-version-180638 --label name.minikube.sigs.k8s.io=old-k8s-version-180638 --label created_by.minikube.sigs.k8s.io=true
I1123 08:41:13.898930 196829 oci.go:103] Successfully created a docker volume old-k8s-version-180638
I1123 08:41:13.899032 196829 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-180638-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-180638 --entrypoint /usr/bin/test -v old-k8s-version-180638:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1123 08:41:14.458747 196829 oci.go:107] Successfully prepared a docker volume old-k8s-version-180638
I1123 08:41:14.458805 196829 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1123 08:41:14.458814 196829 kic.go:194] Starting extracting preloaded images to volume ...
I1123 08:41:14.458892 196829 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21966-2339/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-180638:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir
I1123 08:41:19.794152 196829 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21966-2339/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-180638:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir: (5.335195842s)
I1123 08:41:19.794189 196829 kic.go:203] duration metric: took 5.335371475s to extract preloaded images to volume ...
W1123 08:41:19.794328 196829 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1123 08:41:19.794436 196829 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1123 08:41:19.848844 196829 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-180638 --name old-k8s-version-180638 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-180638 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-180638 --network old-k8s-version-180638 --ip 192.168.76.2 --volume old-k8s-version-180638:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1123 08:41:20.177907 196829 cli_runner.go:164] Run: docker container inspect old-k8s-version-180638 --format={{.State.Running}}
I1123 08:41:20.204948 196829 cli_runner.go:164] Run: docker container inspect old-k8s-version-180638 --format={{.State.Status}}
I1123 08:41:20.227539 196829 cli_runner.go:164] Run: docker exec old-k8s-version-180638 stat /var/lib/dpkg/alternatives/iptables
I1123 08:41:20.289856 196829 oci.go:144] the created container "old-k8s-version-180638" has a running status.
I1123 08:41:20.289891 196829 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa...
I1123 08:41:20.448285 196829 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1123 08:41:20.475665 196829 cli_runner.go:164] Run: docker container inspect old-k8s-version-180638 --format={{.State.Status}}
I1123 08:41:20.521617 196829 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1123 08:41:20.521635 196829 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-180638 chown docker:docker /home/docker/.ssh/authorized_keys]
I1123 08:41:20.589359 196829 cli_runner.go:164] Run: docker container inspect old-k8s-version-180638 --format={{.State.Status}}
I1123 08:41:20.625639 196829 machine.go:94] provisionDockerMachine start ...
I1123 08:41:20.625720 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:20.654376 196829 main.go:143] libmachine: Using SSH client type: native
I1123 08:41:20.655192 196829 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33053 <nil> <nil>}
I1123 08:41:20.655341 196829 main.go:143] libmachine: About to run SSH command:
hostname
I1123 08:41:20.656290 196829 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1123 08:41:23.816940 196829 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-180638
I1123 08:41:23.816964 196829 ubuntu.go:182] provisioning hostname "old-k8s-version-180638"
I1123 08:41:23.817040 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:23.833840 196829 main.go:143] libmachine: Using SSH client type: native
I1123 08:41:23.834172 196829 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33053 <nil> <nil>}
I1123 08:41:23.834187 196829 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-180638 && echo "old-k8s-version-180638" | sudo tee /etc/hostname
I1123 08:41:23.999609 196829 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-180638
I1123 08:41:23.999698 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:24.020254 196829 main.go:143] libmachine: Using SSH client type: native
I1123 08:41:24.020584 196829 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33053 <nil> <nil>}
I1123 08:41:24.020601 196829 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-180638' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-180638/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-180638' | sudo tee -a /etc/hosts;
fi
fi
I1123 08:41:24.185924 196829 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:41:24.185946 196829 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21966-2339/.minikube CaCertPath:/home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21966-2339/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21966-2339/.minikube}
I1123 08:41:24.185967 196829 ubuntu.go:190] setting up certificates
I1123 08:41:24.185976 196829 provision.go:84] configureAuth start
I1123 08:41:24.186052 196829 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-180638
I1123 08:41:24.215320 196829 provision.go:143] copyHostCerts
I1123 08:41:24.215378 196829 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-2339/.minikube/ca.pem, removing ...
I1123 08:41:24.215387 196829 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-2339/.minikube/ca.pem
I1123 08:41:24.215451 196829 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21966-2339/.minikube/ca.pem (1078 bytes)
I1123 08:41:24.215548 196829 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-2339/.minikube/cert.pem, removing ...
I1123 08:41:24.215553 196829 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-2339/.minikube/cert.pem
I1123 08:41:24.215581 196829 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21966-2339/.minikube/cert.pem (1123 bytes)
I1123 08:41:24.215633 196829 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-2339/.minikube/key.pem, removing ...
I1123 08:41:24.215638 196829 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-2339/.minikube/key.pem
I1123 08:41:24.215661 196829 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21966-2339/.minikube/key.pem (1675 bytes)
I1123 08:41:24.216026 196829 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21966-2339/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-180638 san=[127.0.0.1 192.168.76.2 localhost minikube old-k8s-version-180638]
I1123 08:41:24.624778 196829 provision.go:177] copyRemoteCerts
I1123 08:41:24.624888 196829 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1123 08:41:24.624959 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:24.646886 196829 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa Username:docker}
I1123 08:41:24.753771 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1123 08:41:24.771993 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1123 08:41:24.790069 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1123 08:41:24.807496 196829 provision.go:87] duration metric: took 621.497153ms to configureAuth
I1123 08:41:24.807563 196829 ubuntu.go:206] setting minikube options for container-runtime
I1123 08:41:24.807769 196829 config.go:182] Loaded profile config "old-k8s-version-180638": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1123 08:41:24.807806 196829 machine.go:97] duration metric: took 4.182148274s to provisionDockerMachine
I1123 08:41:24.807853 196829 client.go:176] duration metric: took 11.063877137s to LocalClient.Create
I1123 08:41:24.807895 196829 start.go:167] duration metric: took 11.063966541s to libmachine.API.Create "old-k8s-version-180638"
I1123 08:41:24.807925 196829 start.go:293] postStartSetup for "old-k8s-version-180638" (driver="docker")
I1123 08:41:24.807964 196829 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1123 08:41:24.808042 196829 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1123 08:41:24.808096 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:24.825195 196829 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa Username:docker}
I1123 08:41:24.930003 196829 ssh_runner.go:195] Run: cat /etc/os-release
I1123 08:41:24.933389 196829 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1123 08:41:24.933440 196829 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1123 08:41:24.933453 196829 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-2339/.minikube/addons for local assets ...
I1123 08:41:24.933516 196829 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-2339/.minikube/files for local assets ...
I1123 08:41:24.933597 196829 filesync.go:149] local asset: /home/jenkins/minikube-integration/21966-2339/.minikube/files/etc/ssl/certs/41512.pem -> 41512.pem in /etc/ssl/certs
I1123 08:41:24.933700 196829 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1123 08:41:24.941173 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/files/etc/ssl/certs/41512.pem --> /etc/ssl/certs/41512.pem (1708 bytes)
I1123 08:41:24.960763 196829 start.go:296] duration metric: took 152.794115ms for postStartSetup
I1123 08:41:24.961139 196829 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-180638
I1123 08:41:24.978306 196829 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/config.json ...
I1123 08:41:24.978587 196829 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1123 08:41:24.978642 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:24.994847 196829 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa Username:docker}
I1123 08:41:25.098792 196829 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1123 08:41:25.103719 196829 start.go:128] duration metric: took 11.363355721s to createHost
I1123 08:41:25.103745 196829 start.go:83] releasing machines lock for "old-k8s-version-180638", held for 11.363481187s
I1123 08:41:25.103820 196829 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-180638
I1123 08:41:25.123598 196829 ssh_runner.go:195] Run: cat /version.json
I1123 08:41:25.123615 196829 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1123 08:41:25.123646 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:25.123677 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:25.149385 196829 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa Username:docker}
I1123 08:41:25.159257 196829 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa Username:docker}
I1123 08:41:25.253035 196829 ssh_runner.go:195] Run: systemctl --version
I1123 08:41:25.348445 196829 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1123 08:41:25.352830 196829 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1123 08:41:25.352933 196829 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1123 08:41:25.381383 196829 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I1123 08:41:25.381469 196829 start.go:496] detecting cgroup driver to use...
I1123 08:41:25.381508 196829 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1123 08:41:25.381570 196829 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1123 08:41:25.397040 196829 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:41:25.410260 196829 docker.go:218] disabling cri-docker service (if available) ...
I1123 08:41:25.410362 196829 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1123 08:41:25.428008 196829 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1123 08:41:25.447082 196829 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1123 08:41:25.620588 196829 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1123 08:41:25.749588 196829 docker.go:234] disabling docker service ...
I1123 08:41:25.749661 196829 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1123 08:41:25.772076 196829 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1123 08:41:25.784914 196829 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1123 08:41:25.899082 196829 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1123 08:41:26.009981 196829 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1123 08:41:26.025315 196829 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:41:26.039953 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1123 08:41:26.049471 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1123 08:41:26.059847 196829 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1123 08:41:26.060009 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1123 08:41:26.069667 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:41:26.079903 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1123 08:41:26.089816 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:41:26.099752 196829 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1123 08:41:26.108060 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1123 08:41:26.117585 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1123 08:41:26.126366 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1123 08:41:26.135803 196829 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1123 08:41:26.143649 196829 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1123 08:41:26.151206 196829 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:41:26.281475 196829 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1123 08:41:26.394263 196829 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1123 08:41:26.394379 196829 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1123 08:41:26.398397 196829 start.go:564] Will wait 60s for crictl version
I1123 08:41:26.398525 196829 ssh_runner.go:195] Run: which crictl
I1123 08:41:26.402050 196829 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1123 08:41:26.433447 196829 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1123 08:41:26.433548 196829 ssh_runner.go:195] Run: containerd --version
I1123 08:41:26.456534 196829 ssh_runner.go:195] Run: containerd --version
I1123 08:41:26.486458 196829 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1123 08:41:26.489565 196829 cli_runner.go:164] Run: docker network inspect old-k8s-version-180638 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1123 08:41:26.507660 196829 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I1123 08:41:26.511689 196829 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:41:26.521591 196829 kubeadm.go:884] updating cluster {Name:old-k8s-version-180638 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-180638 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1123 08:41:26.521716 196829 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1123 08:41:26.521782 196829 ssh_runner.go:195] Run: sudo crictl images --output json
I1123 08:41:26.552790 196829 containerd.go:627] all images are preloaded for containerd runtime.
I1123 08:41:26.552815 196829 containerd.go:534] Images already preloaded, skipping extraction
I1123 08:41:26.552879 196829 ssh_runner.go:195] Run: sudo crictl images --output json
I1123 08:41:26.589503 196829 containerd.go:627] all images are preloaded for containerd runtime.
I1123 08:41:26.589526 196829 cache_images.go:86] Images are preloaded, skipping loading
I1123 08:41:26.589533 196829 kubeadm.go:935] updating node { 192.168.76.2 8443 v1.28.0 containerd true true} ...
I1123 08:41:26.589674 196829 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-180638 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-180638 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1123 08:41:26.589739 196829 ssh_runner.go:195] Run: sudo crictl info
I1123 08:41:26.615213 196829 cni.go:84] Creating CNI manager for ""
I1123 08:41:26.615295 196829 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 08:41:26.615324 196829 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1123 08:41:26.615377 196829 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-180638 NodeName:old-k8s-version-180638 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt
StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1123 08:41:26.615549 196829 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-180638"
kubeletExtraArgs:
node-ip: 192.168.76.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1123 08:41:26.615640 196829 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1123 08:41:26.623537 196829 binaries.go:51] Found k8s binaries, skipping transfer
I1123 08:41:26.623635 196829 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1123 08:41:26.631295 196829 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1123 08:41:26.643882 196829 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1123 08:41:26.657243 196829 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2176 bytes)
I1123 08:41:26.669640 196829 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I1123 08:41:26.673282 196829 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:41:26.685864 196829 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:41:26.794513 196829 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:41:26.810973 196829 certs.go:69] Setting up /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638 for IP: 192.168.76.2
I1123 08:41:26.811039 196829 certs.go:195] generating shared ca certs ...
I1123 08:41:26.811080 196829 certs.go:227] acquiring lock for ca certs: {Name:mke0fc62f41acbef5eb3e84af3a3b8f9858bd1fc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:26.811250 196829 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21966-2339/.minikube/ca.key
I1123 08:41:26.811333 196829 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21966-2339/.minikube/proxy-client-ca.key
I1123 08:41:26.811355 196829 certs.go:257] generating profile certs ...
I1123 08:41:26.811440 196829 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/client.key
I1123 08:41:26.811477 196829 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/client.crt with IP's: []
I1123 08:41:26.973605 196829 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/client.crt ...
I1123 08:41:26.973639 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/client.crt: {Name:mke32e0874274fa8086c901b1e6afbf9faff17cf Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:26.973836 196829 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/client.key ...
I1123 08:41:26.973854 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/client.key: {Name:mk164b3f8143768da540cf1b000f576503ef0774 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:26.974478 196829 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.key.28528907
I1123 08:41:26.974505 196829 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.crt.28528907 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I1123 08:41:27.162797 196829 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.crt.28528907 ...
I1123 08:41:27.162827 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.crt.28528907: {Name:mk89f25fc4240f5ec0b53706cf7a05d65ec41dcd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:27.163533 196829 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.key.28528907 ...
I1123 08:41:27.163550 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.key.28528907: {Name:mkceae69a15be6eedc78c0f192aa68e5077c2c60 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:27.164156 196829 certs.go:382] copying /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.crt.28528907 -> /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.crt
I1123 08:41:27.164252 196829 certs.go:386] copying /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.key.28528907 -> /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.key
I1123 08:41:27.164317 196829 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.key
I1123 08:41:27.164337 196829 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.crt with IP's: []
I1123 08:41:27.589335 196829 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.crt ...
I1123 08:41:27.589366 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.crt: {Name:mk5e88fa47e7c5af72b6e967a38cd87e0cc58d20 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:27.590109 196829 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.key ...
I1123 08:41:27.590126 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.key: {Name:mka6f06ef565fc329562ab2f39faf7c67e598a55 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:27.590847 196829 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/4151.pem (1338 bytes)
W1123 08:41:27.590897 196829 certs.go:480] ignoring /home/jenkins/minikube-integration/21966-2339/.minikube/certs/4151_empty.pem, impossibly tiny 0 bytes
I1123 08:41:27.590910 196829 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca-key.pem (1679 bytes)
I1123 08:41:27.590954 196829 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca.pem (1078 bytes)
I1123 08:41:27.590984 196829 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/cert.pem (1123 bytes)
I1123 08:41:27.591012 196829 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/key.pem (1675 bytes)
I1123 08:41:27.591064 196829 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-2339/.minikube/files/etc/ssl/certs/41512.pem (1708 bytes)
I1123 08:41:27.591653 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1123 08:41:27.611397 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1123 08:41:27.628655 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1123 08:41:27.646428 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1123 08:41:27.663648 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1123 08:41:27.680373 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1123 08:41:27.697528 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1123 08:41:27.718625 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1123 08:41:27.735969 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1123 08:41:27.753670 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/certs/4151.pem --> /usr/share/ca-certificates/4151.pem (1338 bytes)
I1123 08:41:27.772203 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/files/etc/ssl/certs/41512.pem --> /usr/share/ca-certificates/41512.pem (1708 bytes)
I1123 08:41:27.790388 196829 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1123 08:41:27.803782 196829 ssh_runner.go:195] Run: openssl version
I1123 08:41:27.810231 196829 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/4151.pem && ln -fs /usr/share/ca-certificates/4151.pem /etc/ssl/certs/4151.pem"
I1123 08:41:27.818398 196829 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/4151.pem
I1123 08:41:27.822235 196829 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 23 08:02 /usr/share/ca-certificates/4151.pem
I1123 08:41:27.822298 196829 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/4151.pem
I1123 08:41:27.864039 196829 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/4151.pem /etc/ssl/certs/51391683.0"
I1123 08:41:27.872287 196829 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/41512.pem && ln -fs /usr/share/ca-certificates/41512.pem /etc/ssl/certs/41512.pem"
I1123 08:41:27.880642 196829 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/41512.pem
I1123 08:41:27.884373 196829 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 23 08:02 /usr/share/ca-certificates/41512.pem
I1123 08:41:27.884446 196829 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/41512.pem
I1123 08:41:27.925706 196829 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/41512.pem /etc/ssl/certs/3ec20f2e.0"
I1123 08:41:27.933986 196829 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1123 08:41:27.942212 196829 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1123 08:41:27.945912 196829 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 23 07:56 /usr/share/ca-certificates/minikubeCA.pem
I1123 08:41:27.945995 196829 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1123 08:41:27.987134 196829 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1123 08:41:27.995374 196829 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1123 08:41:27.999559 196829 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1123 08:41:27.999640 196829 kubeadm.go:401] StartCluster: {Name:old-k8s-version-180638 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-180638 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:41:27.999724 196829 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1123 08:41:27.999901 196829 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1123 08:41:28.030022 196829 cri.go:89] found id: ""
I1123 08:41:28.030090 196829 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1123 08:41:28.038618 196829 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1123 08:41:28.046519 196829 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1123 08:41:28.046606 196829 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1123 08:41:28.054666 196829 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1123 08:41:28.054688 196829 kubeadm.go:158] found existing configuration files:
I1123 08:41:28.054763 196829 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1123 08:41:28.062722 196829 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1123 08:41:28.062824 196829 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1123 08:41:28.070543 196829 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1123 08:41:28.078377 196829 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1123 08:41:28.078469 196829 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1123 08:41:28.085999 196829 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1123 08:41:28.093970 196829 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1123 08:41:28.094044 196829 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1123 08:41:28.101534 196829 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1123 08:41:28.109634 196829 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1123 08:41:28.109755 196829 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1123 08:41:28.117144 196829 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1123 08:41:28.212901 196829 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I1123 08:41:28.307897 196829 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1123 08:41:46.723355 196829 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1123 08:41:46.723418 196829 kubeadm.go:319] [preflight] Running pre-flight checks
I1123 08:41:46.723506 196829 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1123 08:41:46.723561 196829 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I1123 08:41:46.723595 196829 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1123 08:41:46.723640 196829 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1123 08:41:46.723688 196829 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1123 08:41:46.723735 196829 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1123 08:41:46.723783 196829 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1123 08:41:46.723830 196829 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1123 08:41:46.723879 196829 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1123 08:41:46.723925 196829 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1123 08:41:46.723972 196829 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1123 08:41:46.724018 196829 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1123 08:41:46.724090 196829 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1123 08:41:46.724184 196829 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1123 08:41:46.724277 196829 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1123 08:41:46.724339 196829 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1123 08:41:46.730394 196829 out.go:252] - Generating certificates and keys ...
I1123 08:41:46.730493 196829 kubeadm.go:319] [certs] Using existing ca certificate authority
I1123 08:41:46.730559 196829 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1123 08:41:46.730625 196829 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1123 08:41:46.730681 196829 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1123 08:41:46.730740 196829 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1123 08:41:46.730789 196829 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1123 08:41:46.730843 196829 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1123 08:41:46.730979 196829 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-180638] and IPs [192.168.76.2 127.0.0.1 ::1]
I1123 08:41:46.731033 196829 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1123 08:41:46.731156 196829 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-180638] and IPs [192.168.76.2 127.0.0.1 ::1]
I1123 08:41:46.731221 196829 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1123 08:41:46.731283 196829 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1123 08:41:46.731327 196829 kubeadm.go:319] [certs] Generating "sa" key and public key
I1123 08:41:46.731382 196829 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1123 08:41:46.731432 196829 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1123 08:41:46.731487 196829 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1123 08:41:46.731552 196829 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1123 08:41:46.731606 196829 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1123 08:41:46.731687 196829 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1123 08:41:46.732404 196829 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1123 08:41:46.735499 196829 out.go:252] - Booting up control plane ...
I1123 08:41:46.735693 196829 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1123 08:41:46.735790 196829 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1123 08:41:46.735869 196829 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1123 08:41:46.735991 196829 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1123 08:41:46.736083 196829 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1123 08:41:46.736124 196829 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1123 08:41:46.736298 196829 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1123 08:41:46.736379 196829 kubeadm.go:319] [apiclient] All control plane components are healthy after 7.016975 seconds
I1123 08:41:46.736508 196829 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1123 08:41:46.736649 196829 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1123 08:41:46.736716 196829 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1123 08:41:46.737049 196829 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-180638 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1123 08:41:46.737114 196829 kubeadm.go:319] [bootstrap-token] Using token: 89uxh1.yt288j2wm2p51h2c
I1123 08:41:46.740440 196829 out.go:252] - Configuring RBAC rules ...
I1123 08:41:46.740562 196829 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1123 08:41:46.740658 196829 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1123 08:41:46.740805 196829 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1123 08:41:46.740950 196829 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1123 08:41:46.741070 196829 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1123 08:41:46.741162 196829 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1123 08:41:46.741276 196829 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1123 08:41:46.741318 196829 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1123 08:41:46.741363 196829 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1123 08:41:46.741369 196829 kubeadm.go:319]
I1123 08:41:46.741466 196829 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1123 08:41:46.741471 196829 kubeadm.go:319]
I1123 08:41:46.741547 196829 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1123 08:41:46.741551 196829 kubeadm.go:319]
I1123 08:41:46.741575 196829 kubeadm.go:319] mkdir -p $HOME/.kube
I1123 08:41:46.741639 196829 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1123 08:41:46.741693 196829 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1123 08:41:46.741696 196829 kubeadm.go:319]
I1123 08:41:46.741757 196829 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1123 08:41:46.741761 196829 kubeadm.go:319]
I1123 08:41:46.741808 196829 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1123 08:41:46.741811 196829 kubeadm.go:319]
I1123 08:41:46.741868 196829 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1123 08:41:46.741944 196829 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1123 08:41:46.742020 196829 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1123 08:41:46.742024 196829 kubeadm.go:319]
I1123 08:41:46.742111 196829 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1123 08:41:46.742188 196829 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1123 08:41:46.742192 196829 kubeadm.go:319]
I1123 08:41:46.742277 196829 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 89uxh1.yt288j2wm2p51h2c \
I1123 08:41:46.742380 196829 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:4f35f48b47910e0f0424b1b0ace7d03cfc1e6ef5b162b679e98eef4f3a64a5a5 \
I1123 08:41:46.742400 196829 kubeadm.go:319] --control-plane
I1123 08:41:46.742404 196829 kubeadm.go:319]
I1123 08:41:46.742493 196829 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1123 08:41:46.742497 196829 kubeadm.go:319]
I1123 08:41:46.742578 196829 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 89uxh1.yt288j2wm2p51h2c \
I1123 08:41:46.742696 196829 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:4f35f48b47910e0f0424b1b0ace7d03cfc1e6ef5b162b679e98eef4f3a64a5a5
I1123 08:41:46.742705 196829 cni.go:84] Creating CNI manager for ""
I1123 08:41:46.742712 196829 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 08:41:46.747905 196829 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1123 08:41:46.750796 196829 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1123 08:41:46.761561 196829 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1123 08:41:46.761582 196829 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1123 08:41:46.780526 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1123 08:41:47.782764 196829 ssh_runner.go:235] Completed: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml: (1.002206277s)
I1123 08:41:47.782810 196829 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1123 08:41:47.782925 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:47.783012 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-180638 minikube.k8s.io/updated_at=2025_11_23T08_41_47_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=3e219827a5f064cf736992b79e59864301ece66e minikube.k8s.io/name=old-k8s-version-180638 minikube.k8s.io/primary=true
I1123 08:41:47.996747 196829 ops.go:34] apiserver oom_adj: -16
I1123 08:41:47.996865 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:48.497263 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:48.997587 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:49.497238 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:49.996982 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:50.497817 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:50.996983 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:51.497681 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:51.997616 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:52.497659 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:52.997821 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:53.497324 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:53.997887 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:54.496981 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:54.996975 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:55.496982 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:55.997716 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:56.497689 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:56.997844 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:57.497606 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:57.997246 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:58.497272 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:58.997225 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:59.497615 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:59.996938 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:42:00.373202 196829 kubeadm.go:1114] duration metric: took 12.590316137s to wait for elevateKubeSystemPrivileges
I1123 08:42:00.373235 196829 kubeadm.go:403] duration metric: took 32.37359943s to StartCluster
I1123 08:42:00.373254 196829 settings.go:142] acquiring lock: {Name:mkfb77243b31dfe604b438e7da3f1bce2ba7b5a6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:42:00.373329 196829 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21966-2339/kubeconfig
I1123 08:42:00.374576 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/kubeconfig: {Name:mka042f83263da2d190b70c2277735bf705fab5c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:42:00.374865 196829 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1123 08:42:00.375126 196829 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1123 08:42:00.375440 196829 config.go:182] Loaded profile config "old-k8s-version-180638": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1123 08:42:00.375497 196829 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1123 08:42:00.375560 196829 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-180638"
I1123 08:42:00.375575 196829 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-180638"
I1123 08:42:00.375597 196829 host.go:66] Checking if "old-k8s-version-180638" exists ...
I1123 08:42:00.375813 196829 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-180638"
I1123 08:42:00.375848 196829 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-180638"
I1123 08:42:00.376308 196829 cli_runner.go:164] Run: docker container inspect old-k8s-version-180638 --format={{.State.Status}}
I1123 08:42:00.376539 196829 cli_runner.go:164] Run: docker container inspect old-k8s-version-180638 --format={{.State.Status}}
I1123 08:42:00.379011 196829 out.go:179] * Verifying Kubernetes components...
I1123 08:42:00.382111 196829 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:42:00.428496 196829 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-180638"
I1123 08:42:00.428566 196829 host.go:66] Checking if "old-k8s-version-180638" exists ...
I1123 08:42:00.429356 196829 cli_runner.go:164] Run: docker container inspect old-k8s-version-180638 --format={{.State.Status}}
I1123 08:42:00.444047 196829 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:42:00.448509 196829 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:42:00.448558 196829 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1123 08:42:00.448647 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:42:00.472475 196829 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1123 08:42:00.472504 196829 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1123 08:42:00.472636 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:42:00.490205 196829 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa Username:docker}
I1123 08:42:00.514193 196829 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa Username:docker}
I1123 08:42:00.878161 196829 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1123 08:42:00.878301 196829 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:42:00.916437 196829 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1123 08:42:01.023971 196829 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:42:01.723716 196829 start.go:977] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I1123 08:42:01.726193 196829 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-180638" to be "Ready" ...
I1123 08:42:02.171067 196829 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.147020479s)
I1123 08:42:02.174415 196829 out.go:179] * Enabled addons: default-storageclass, storage-provisioner
I1123 08:42:02.177439 196829 addons.go:530] duration metric: took 1.801906906s for enable addons: enabled=[default-storageclass storage-provisioner]
I1123 08:42:02.232613 196829 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-180638" context rescaled to 1 replicas
W1123 08:42:03.730244 196829 node_ready.go:57] node "old-k8s-version-180638" has "Ready":"False" status (will retry)
W1123 08:42:06.235867 196829 node_ready.go:57] node "old-k8s-version-180638" has "Ready":"False" status (will retry)
W1123 08:42:08.729375 196829 node_ready.go:57] node "old-k8s-version-180638" has "Ready":"False" status (will retry)
W1123 08:42:10.729575 196829 node_ready.go:57] node "old-k8s-version-180638" has "Ready":"False" status (will retry)
W1123 08:42:12.729904 196829 node_ready.go:57] node "old-k8s-version-180638" has "Ready":"False" status (will retry)
I1123 08:42:13.730112 196829 node_ready.go:49] node "old-k8s-version-180638" is "Ready"
I1123 08:42:13.730141 196829 node_ready.go:38] duration metric: took 12.003828725s for node "old-k8s-version-180638" to be "Ready" ...
I1123 08:42:13.730157 196829 api_server.go:52] waiting for apiserver process to appear ...
I1123 08:42:13.730215 196829 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 08:42:13.742876 196829 api_server.go:72] duration metric: took 13.367936978s to wait for apiserver process to appear ...
I1123 08:42:13.742904 196829 api_server.go:88] waiting for apiserver healthz status ...
I1123 08:42:13.742928 196829 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:42:13.752538 196829 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I1123 08:42:13.753958 196829 api_server.go:141] control plane version: v1.28.0
I1123 08:42:13.753984 196829 api_server.go:131] duration metric: took 11.072911ms to wait for apiserver health ...
I1123 08:42:13.753994 196829 system_pods.go:43] waiting for kube-system pods to appear ...
I1123 08:42:13.757334 196829 system_pods.go:59] 8 kube-system pods found
I1123 08:42:13.757377 196829 system_pods.go:61] "coredns-5dd5756b68-q4lbv" [9a14996d-e910-4a4f-a6f6-f2d8565a4b9c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:42:13.757384 196829 system_pods.go:61] "etcd-old-k8s-version-180638" [d7e82a35-eda7-493b-8f80-319fff10e0a8] Running
I1123 08:42:13.757390 196829 system_pods.go:61] "kindnet-mrfgl" [53d90f3f-687b-45a0-a344-321a75f38a20] Running
I1123 08:42:13.757394 196829 system_pods.go:61] "kube-apiserver-old-k8s-version-180638" [6d727a9f-96a5-47f1-8676-3463c38e31e8] Running
I1123 08:42:13.757398 196829 system_pods.go:61] "kube-controller-manager-old-k8s-version-180638" [92875b86-8bd3-4b30-acdd-2c65db14c97e] Running
I1123 08:42:13.757402 196829 system_pods.go:61] "kube-proxy-dk6g5" [27bc489f-26f8-4848-9df2-6530dcad7423] Running
I1123 08:42:13.757449 196829 system_pods.go:61] "kube-scheduler-old-k8s-version-180638" [76e55a3f-6b02-43c4-ae79-01300e9dd2c6] Running
I1123 08:42:13.757461 196829 system_pods.go:61] "storage-provisioner" [fa923b06-d896-468f-8e82-51b4e9df88dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:42:13.757470 196829 system_pods.go:74] duration metric: took 3.469421ms to wait for pod list to return data ...
I1123 08:42:13.757483 196829 default_sa.go:34] waiting for default service account to be created ...
I1123 08:42:13.759772 196829 default_sa.go:45] found service account: "default"
I1123 08:42:13.759795 196829 default_sa.go:55] duration metric: took 2.306419ms for default service account to be created ...
I1123 08:42:13.759805 196829 system_pods.go:116] waiting for k8s-apps to be running ...
I1123 08:42:13.764346 196829 system_pods.go:86] 8 kube-system pods found
I1123 08:42:13.764381 196829 system_pods.go:89] "coredns-5dd5756b68-q4lbv" [9a14996d-e910-4a4f-a6f6-f2d8565a4b9c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:42:13.764387 196829 system_pods.go:89] "etcd-old-k8s-version-180638" [d7e82a35-eda7-493b-8f80-319fff10e0a8] Running
I1123 08:42:13.764393 196829 system_pods.go:89] "kindnet-mrfgl" [53d90f3f-687b-45a0-a344-321a75f38a20] Running
I1123 08:42:13.764398 196829 system_pods.go:89] "kube-apiserver-old-k8s-version-180638" [6d727a9f-96a5-47f1-8676-3463c38e31e8] Running
I1123 08:42:13.764402 196829 system_pods.go:89] "kube-controller-manager-old-k8s-version-180638" [92875b86-8bd3-4b30-acdd-2c65db14c97e] Running
I1123 08:42:13.764426 196829 system_pods.go:89] "kube-proxy-dk6g5" [27bc489f-26f8-4848-9df2-6530dcad7423] Running
I1123 08:42:13.764438 196829 system_pods.go:89] "kube-scheduler-old-k8s-version-180638" [76e55a3f-6b02-43c4-ae79-01300e9dd2c6] Running
I1123 08:42:13.764445 196829 system_pods.go:89] "storage-provisioner" [fa923b06-d896-468f-8e82-51b4e9df88dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:42:13.764468 196829 retry.go:31] will retry after 231.795609ms: missing components: kube-dns
I1123 08:42:14.002188 196829 system_pods.go:86] 8 kube-system pods found
I1123 08:42:14.002226 196829 system_pods.go:89] "coredns-5dd5756b68-q4lbv" [9a14996d-e910-4a4f-a6f6-f2d8565a4b9c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:42:14.002234 196829 system_pods.go:89] "etcd-old-k8s-version-180638" [d7e82a35-eda7-493b-8f80-319fff10e0a8] Running
I1123 08:42:14.002241 196829 system_pods.go:89] "kindnet-mrfgl" [53d90f3f-687b-45a0-a344-321a75f38a20] Running
I1123 08:42:14.002290 196829 system_pods.go:89] "kube-apiserver-old-k8s-version-180638" [6d727a9f-96a5-47f1-8676-3463c38e31e8] Running
I1123 08:42:14.002297 196829 system_pods.go:89] "kube-controller-manager-old-k8s-version-180638" [92875b86-8bd3-4b30-acdd-2c65db14c97e] Running
I1123 08:42:14.002309 196829 system_pods.go:89] "kube-proxy-dk6g5" [27bc489f-26f8-4848-9df2-6530dcad7423] Running
I1123 08:42:14.002313 196829 system_pods.go:89] "kube-scheduler-old-k8s-version-180638" [76e55a3f-6b02-43c4-ae79-01300e9dd2c6] Running
I1123 08:42:14.002319 196829 system_pods.go:89] "storage-provisioner" [fa923b06-d896-468f-8e82-51b4e9df88dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:42:14.002358 196829 retry.go:31] will retry after 309.541133ms: missing components: kube-dns
I1123 08:42:14.316329 196829 system_pods.go:86] 8 kube-system pods found
I1123 08:42:14.316371 196829 system_pods.go:89] "coredns-5dd5756b68-q4lbv" [9a14996d-e910-4a4f-a6f6-f2d8565a4b9c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:42:14.316378 196829 system_pods.go:89] "etcd-old-k8s-version-180638" [d7e82a35-eda7-493b-8f80-319fff10e0a8] Running
I1123 08:42:14.316410 196829 system_pods.go:89] "kindnet-mrfgl" [53d90f3f-687b-45a0-a344-321a75f38a20] Running
I1123 08:42:14.316416 196829 system_pods.go:89] "kube-apiserver-old-k8s-version-180638" [6d727a9f-96a5-47f1-8676-3463c38e31e8] Running
I1123 08:42:14.316420 196829 system_pods.go:89] "kube-controller-manager-old-k8s-version-180638" [92875b86-8bd3-4b30-acdd-2c65db14c97e] Running
I1123 08:42:14.316425 196829 system_pods.go:89] "kube-proxy-dk6g5" [27bc489f-26f8-4848-9df2-6530dcad7423] Running
I1123 08:42:14.316453 196829 system_pods.go:89] "kube-scheduler-old-k8s-version-180638" [76e55a3f-6b02-43c4-ae79-01300e9dd2c6] Running
I1123 08:42:14.316462 196829 system_pods.go:89] "storage-provisioner" [fa923b06-d896-468f-8e82-51b4e9df88dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:42:14.316487 196829 retry.go:31] will retry after 469.87728ms: missing components: kube-dns
I1123 08:42:14.791058 196829 system_pods.go:86] 8 kube-system pods found
I1123 08:42:14.791093 196829 system_pods.go:89] "coredns-5dd5756b68-q4lbv" [9a14996d-e910-4a4f-a6f6-f2d8565a4b9c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:42:14.791100 196829 system_pods.go:89] "etcd-old-k8s-version-180638" [d7e82a35-eda7-493b-8f80-319fff10e0a8] Running
I1123 08:42:14.791106 196829 system_pods.go:89] "kindnet-mrfgl" [53d90f3f-687b-45a0-a344-321a75f38a20] Running
I1123 08:42:14.791110 196829 system_pods.go:89] "kube-apiserver-old-k8s-version-180638" [6d727a9f-96a5-47f1-8676-3463c38e31e8] Running
I1123 08:42:14.791115 196829 system_pods.go:89] "kube-controller-manager-old-k8s-version-180638" [92875b86-8bd3-4b30-acdd-2c65db14c97e] Running
I1123 08:42:14.791119 196829 system_pods.go:89] "kube-proxy-dk6g5" [27bc489f-26f8-4848-9df2-6530dcad7423] Running
I1123 08:42:14.791123 196829 system_pods.go:89] "kube-scheduler-old-k8s-version-180638" [76e55a3f-6b02-43c4-ae79-01300e9dd2c6] Running
I1123 08:42:14.791129 196829 system_pods.go:89] "storage-provisioner" [fa923b06-d896-468f-8e82-51b4e9df88dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:42:14.791144 196829 retry.go:31] will retry after 367.579223ms: missing components: kube-dns
I1123 08:42:15.163345 196829 system_pods.go:86] 8 kube-system pods found
I1123 08:42:15.163377 196829 system_pods.go:89] "coredns-5dd5756b68-q4lbv" [9a14996d-e910-4a4f-a6f6-f2d8565a4b9c] Running
I1123 08:42:15.163384 196829 system_pods.go:89] "etcd-old-k8s-version-180638" [d7e82a35-eda7-493b-8f80-319fff10e0a8] Running
I1123 08:42:15.163388 196829 system_pods.go:89] "kindnet-mrfgl" [53d90f3f-687b-45a0-a344-321a75f38a20] Running
I1123 08:42:15.163393 196829 system_pods.go:89] "kube-apiserver-old-k8s-version-180638" [6d727a9f-96a5-47f1-8676-3463c38e31e8] Running
I1123 08:42:15.163398 196829 system_pods.go:89] "kube-controller-manager-old-k8s-version-180638" [92875b86-8bd3-4b30-acdd-2c65db14c97e] Running
I1123 08:42:15.163401 196829 system_pods.go:89] "kube-proxy-dk6g5" [27bc489f-26f8-4848-9df2-6530dcad7423] Running
I1123 08:42:15.163405 196829 system_pods.go:89] "kube-scheduler-old-k8s-version-180638" [76e55a3f-6b02-43c4-ae79-01300e9dd2c6] Running
I1123 08:42:15.163409 196829 system_pods.go:89] "storage-provisioner" [fa923b06-d896-468f-8e82-51b4e9df88dc] Running
I1123 08:42:15.163417 196829 system_pods.go:126] duration metric: took 1.403606184s to wait for k8s-apps to be running ...
I1123 08:42:15.163424 196829 system_svc.go:44] waiting for kubelet service to be running ....
I1123 08:42:15.163481 196829 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1123 08:42:15.176644 196829 system_svc.go:56] duration metric: took 13.210368ms WaitForService to wait for kubelet
I1123 08:42:15.176674 196829 kubeadm.go:587] duration metric: took 14.80173902s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 08:42:15.176693 196829 node_conditions.go:102] verifying NodePressure condition ...
I1123 08:42:15.179781 196829 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1123 08:42:15.179818 196829 node_conditions.go:123] node cpu capacity is 2
I1123 08:42:15.179832 196829 node_conditions.go:105] duration metric: took 3.134393ms to run NodePressure ...
I1123 08:42:15.179843 196829 start.go:242] waiting for startup goroutines ...
I1123 08:42:15.179851 196829 start.go:247] waiting for cluster config update ...
I1123 08:42:15.179867 196829 start.go:256] writing updated cluster config ...
I1123 08:42:15.180158 196829 ssh_runner.go:195] Run: rm -f paused
I1123 08:42:15.184124 196829 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 08:42:15.188984 196829 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-q4lbv" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.197388 196829 pod_ready.go:94] pod "coredns-5dd5756b68-q4lbv" is "Ready"
I1123 08:42:15.197483 196829 pod_ready.go:86] duration metric: took 8.468594ms for pod "coredns-5dd5756b68-q4lbv" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.200541 196829 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.205348 196829 pod_ready.go:94] pod "etcd-old-k8s-version-180638" is "Ready"
I1123 08:42:15.205396 196829 pod_ready.go:86] duration metric: took 4.809714ms for pod "etcd-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.208274 196829 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.213022 196829 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-180638" is "Ready"
I1123 08:42:15.213049 196829 pod_ready.go:86] duration metric: took 4.746468ms for pod "kube-apiserver-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.216062 196829 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.588621 196829 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-180638" is "Ready"
I1123 08:42:15.588649 196829 pod_ready.go:86] duration metric: took 372.560174ms for pod "kube-controller-manager-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.789577 196829 pod_ready.go:83] waiting for pod "kube-proxy-dk6g5" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:16.188996 196829 pod_ready.go:94] pod "kube-proxy-dk6g5" is "Ready"
I1123 08:42:16.189025 196829 pod_ready.go:86] duration metric: took 399.418985ms for pod "kube-proxy-dk6g5" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:16.388950 196829 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:16.788322 196829 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-180638" is "Ready"
I1123 08:42:16.788348 196829 pod_ready.go:86] duration metric: took 399.371796ms for pod "kube-scheduler-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:16.788362 196829 pod_ready.go:40] duration metric: took 1.604205013s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 08:42:16.845637 196829 start.go:625] kubectl: 1.33.2, cluster: 1.28.0 (minor skew: 5)
I1123 08:42:16.848524 196829 out.go:203]
W1123 08:42:16.851133 196829 out.go:285] ! /usr/local/bin/kubectl is version 1.33.2, which may have incompatibilities with Kubernetes 1.28.0.
I1123 08:42:16.854166 196829 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1123 08:42:16.857768 196829 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-180638" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
91bc48b43ecc6 1611cd07b61d5 7 seconds ago Running busybox 0 e4ae249cb52e3 busybox default
d28eb2e2ce196 ba04bb24b9575 13 seconds ago Running storage-provisioner 0 a34410332e173 storage-provisioner kube-system
7c2ec14edc41a 97e04611ad434 13 seconds ago Running coredns 0 c2b32ac0a3158 coredns-5dd5756b68-q4lbv kube-system
75439fed83684 b1a8c6f707935 24 seconds ago Running kindnet-cni 0 4f646733919cf kindnet-mrfgl kube-system
a92786aea3fde 940f54a5bcae9 26 seconds ago Running kube-proxy 0 304e17d801222 kube-proxy-dk6g5 kube-system
dd592fa780598 9cdd6470f48c8 47 seconds ago Running etcd 0 6c8aefe95a6ce etcd-old-k8s-version-180638 kube-system
9b79849edeb76 00543d2fe5d71 47 seconds ago Running kube-apiserver 0 53e8e5479de81 kube-apiserver-old-k8s-version-180638 kube-system
3a3a4da63be8b 46cc66ccc7c19 47 seconds ago Running kube-controller-manager 0 79bfc44a51fa1 kube-controller-manager-old-k8s-version-180638 kube-system
81034c6fa713b 762dce4090c5f 47 seconds ago Running kube-scheduler 0 13b0850cbdf71 kube-scheduler-old-k8s-version-180638 kube-system
==> containerd <==
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.182847695Z" level=info msg="CreateContainer within sandbox \"c2b32ac0a3158e5b8e88a60e8ec54f99f67326e1aba5a91b8ead5c4893516fa1\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"7c2ec14edc41a4bad3c997ddbc366a57e75740ffe5d4d804776570d3bbf4089a\""
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.185890280Z" level=info msg="StartContainer for \"7c2ec14edc41a4bad3c997ddbc366a57e75740ffe5d4d804776570d3bbf4089a\""
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.188348487Z" level=info msg="connecting to shim 7c2ec14edc41a4bad3c997ddbc366a57e75740ffe5d4d804776570d3bbf4089a" address="unix:///run/containerd/s/23e8ed77d6d2545cc040cf10e94b9aa1307cea730c4e678c5b1ba5d216eb3aae" protocol=ttrpc version=3
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.191389545Z" level=info msg="CreateContainer within sandbox \"a34410332e1739898fe28b96e52dd9c87f97e3c9bb7b1ffd7f9865c04fcab2a8\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"d28eb2e2ce19649f4947ef6afbf30d211d2dfa34551b90f0c10c58fdb65b63cd\""
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.193542238Z" level=info msg="StartContainer for \"d28eb2e2ce19649f4947ef6afbf30d211d2dfa34551b90f0c10c58fdb65b63cd\""
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.194431932Z" level=info msg="connecting to shim d28eb2e2ce19649f4947ef6afbf30d211d2dfa34551b90f0c10c58fdb65b63cd" address="unix:///run/containerd/s/21496efb2be254d32b19cec40d0f9ba01ff31efa61fb387b7a12652ab6551c66" protocol=ttrpc version=3
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.282934097Z" level=info msg="StartContainer for \"7c2ec14edc41a4bad3c997ddbc366a57e75740ffe5d4d804776570d3bbf4089a\" returns successfully"
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.297466583Z" level=info msg="StartContainer for \"d28eb2e2ce19649f4947ef6afbf30d211d2dfa34551b90f0c10c58fdb65b63cd\" returns successfully"
Nov 23 08:42:17 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:17.425584929Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:54457203-a4b0-4bfe-b7e6-9804ec70353f,Namespace:default,Attempt:0,}"
Nov 23 08:42:17 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:17.487893445Z" level=info msg="connecting to shim e4ae249cb52e36b4d0a2f9b31e40d2ac6f561a86c7f5020966174ef8dddb28bd" address="unix:///run/containerd/s/48de018ba0d9ba174f3818878e132ec8a301b930403195fb51f42bfd7ba5e6a1" namespace=k8s.io protocol=ttrpc version=3
Nov 23 08:42:17 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:17.543159907Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:54457203-a4b0-4bfe-b7e6-9804ec70353f,Namespace:default,Attempt:0,} returns sandbox id \"e4ae249cb52e36b4d0a2f9b31e40d2ac6f561a86c7f5020966174ef8dddb28bd\""
Nov 23 08:42:17 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:17.545984305Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.686274001Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.688431814Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=1937188"
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.690941320Z" level=info msg="ImageCreate event name:\"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.694195944Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.694856179Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"1935750\" in 2.148586355s"
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.694995922Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\""
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.698429970Z" level=info msg="CreateContainer within sandbox \"e4ae249cb52e36b4d0a2f9b31e40d2ac6f561a86c7f5020966174ef8dddb28bd\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.711352120Z" level=info msg="Container 91bc48b43ecc67ffc1bc3f7fdbc911d26a4116b41c47fa062edb6c0dda1555ed: CDI devices from CRI Config.CDIDevices: []"
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.724026742Z" level=info msg="CreateContainer within sandbox \"e4ae249cb52e36b4d0a2f9b31e40d2ac6f561a86c7f5020966174ef8dddb28bd\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"91bc48b43ecc67ffc1bc3f7fdbc911d26a4116b41c47fa062edb6c0dda1555ed\""
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.724804821Z" level=info msg="StartContainer for \"91bc48b43ecc67ffc1bc3f7fdbc911d26a4116b41c47fa062edb6c0dda1555ed\""
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.725703310Z" level=info msg="connecting to shim 91bc48b43ecc67ffc1bc3f7fdbc911d26a4116b41c47fa062edb6c0dda1555ed" address="unix:///run/containerd/s/48de018ba0d9ba174f3818878e132ec8a301b930403195fb51f42bfd7ba5e6a1" protocol=ttrpc version=3
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.798131338Z" level=info msg="StartContainer for \"91bc48b43ecc67ffc1bc3f7fdbc911d26a4116b41c47fa062edb6c0dda1555ed\" returns successfully"
Nov 23 08:42:26 old-k8s-version-180638 containerd[758]: E1123 08:42:26.310347 758 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [7c2ec14edc41a4bad3c997ddbc366a57e75740ffe5d4d804776570d3bbf4089a] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = b7aacdf6a6aa730aafe4d018cac9b7b5ecfb346cba84a99f64521f87aef8b4958639c1cf97967716465791d05bd38f372615327b7cb1d93c850bae532744d54d
CoreDNS-1.10.1
linux/arm64, go1.20, 055b2c3
[INFO] 127.0.0.1:55800 - 59523 "HINFO IN 7767641017076382384.181717569997239392. udp 56 false 512" NXDOMAIN qr,rd,ra 56 0.011046589s
==> describe nodes <==
Name: old-k8s-version-180638
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=old-k8s-version-180638
kubernetes.io/os=linux
minikube.k8s.io/commit=3e219827a5f064cf736992b79e59864301ece66e
minikube.k8s.io/name=old-k8s-version-180638
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_23T08_41_47_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 23 Nov 2025 08:41:43 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-180638
AcquireTime: <unset>
RenewTime: Sun, 23 Nov 2025 08:42:27 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sun, 23 Nov 2025 08:42:17 +0000 Sun, 23 Nov 2025 08:41:40 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sun, 23 Nov 2025 08:42:17 +0000 Sun, 23 Nov 2025 08:41:40 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sun, 23 Nov 2025 08:42:17 +0000 Sun, 23 Nov 2025 08:41:40 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sun, 23 Nov 2025 08:42:17 +0000 Sun, 23 Nov 2025 08:42:13 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: old-k8s-version-180638
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 7283ea1857f18f20a875c29069214c9d
System UUID: 66eb206b-bbaa-475d-8a79-ca34c9a5fe12
Boot ID: 728df74d-5f50-461c-8d62-9d80cc778630
Kernel Version: 5.15.0-1084-aws
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 10s
kube-system coredns-5dd5756b68-q4lbv 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 27s
kube-system etcd-old-k8s-version-180638 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 40s
kube-system kindnet-mrfgl 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 28s
kube-system kube-apiserver-old-k8s-version-180638 250m (12%) 0 (0%) 0 (0%) 0 (0%) 40s
kube-system kube-controller-manager-old-k8s-version-180638 200m (10%) 0 (0%) 0 (0%) 0 (0%) 40s
kube-system kube-proxy-dk6g5 0 (0%) 0 (0%) 0 (0%) 0 (0%) 28s
kube-system kube-scheduler-old-k8s-version-180638 100m (5%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 25s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 100m (5%)
memory 220Mi (2%) 220Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 26s kube-proxy
Normal Starting 41s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 41s kubelet Node old-k8s-version-180638 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 41s kubelet Node old-k8s-version-180638 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 41s kubelet Node old-k8s-version-180638 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 40s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 28s node-controller Node old-k8s-version-180638 event: Registered Node old-k8s-version-180638 in Controller
Normal NodeReady 14s kubelet Node old-k8s-version-180638 status is now: NodeReady
==> dmesg <==
[Nov23 07:17] ACPI: SRAT not present
[ +0.000000] ACPI: SRAT not present
[ +0.000000] SPI driver altr_a10sr has no spi_device_id for altr,a10sr
[ +0.015154] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.511595] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.034200] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +0.753844] ena 0000:00:05.0: LLQ is not supported Fallback to host mode policy.
[ +6.833249] kauditd_printk_skb: 36 callbacks suppressed
[Nov23 08:37] overlayfs: failed to resolve '/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/22/fs': -2
==> etcd [dd592fa780598a368949db2030306613299c5b0608cf477fbac364062431cf64] <==
{"level":"info","ts":"2025-11-23T08:41:39.991972Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 switched to configuration voters=(16896983918768216326)"}
{"level":"info","ts":"2025-11-23T08:41:39.99787Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","added-peer-id":"ea7e25599daad906","added-peer-peer-urls":["https://192.168.76.2:2380"]}
{"level":"info","ts":"2025-11-23T08:41:40.001646Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-11-23T08:41:40.001836Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-11-23T08:41:40.002083Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-11-23T08:41:40.005961Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"ea7e25599daad906","initial-advertise-peer-urls":["https://192.168.76.2:2380"],"listen-peer-urls":["https://192.168.76.2:2380"],"advertise-client-urls":["https://192.168.76.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.76.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-23T08:41:40.00623Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-23T08:41:40.257458Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-23T08:41:40.25751Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-23T08:41:40.257539Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgPreVoteResp from ea7e25599daad906 at term 1"}
{"level":"info","ts":"2025-11-23T08:41:40.257552Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became candidate at term 2"}
{"level":"info","ts":"2025-11-23T08:41:40.257563Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgVoteResp from ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-23T08:41:40.257574Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became leader at term 2"}
{"level":"info","ts":"2025-11-23T08:41:40.257585Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: ea7e25599daad906 elected leader ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-23T08:41:40.268883Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"ea7e25599daad906","local-member-attributes":"{Name:old-k8s-version-180638 ClientURLs:[https://192.168.76.2:2379]}","request-path":"/0/members/ea7e25599daad906/attributes","cluster-id":"6f20f2c4b2fb5f8a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-23T08:41:40.268928Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-23T08:41:40.269997Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-23T08:41:40.270072Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T08:41:40.279065Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-23T08:41:40.280196Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.76.2:2379"}
{"level":"info","ts":"2025-11-23T08:41:40.28074Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-23T08:41:40.280878Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-23T08:41:40.285959Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T08:41:40.2918Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T08:41:40.291928Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
==> kernel <==
08:42:27 up 1:24, 0 user, load average: 2.87, 3.94, 3.11
Linux old-k8s-version-180638 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [75439fed83684fc39ca1dda64cef2644f6e3027bddbd15dff08e7923652250de] <==
I1123 08:42:03.267849 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1123 08:42:03.357718 1 main.go:139] hostIP = 192.168.76.2
podIP = 192.168.76.2
I1123 08:42:03.358291 1 main.go:148] setting mtu 1500 for CNI
I1123 08:42:03.358311 1 main.go:178] kindnetd IP family: "ipv4"
I1123 08:42:03.358351 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-23T08:42:03Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1123 08:42:03.558800 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1123 08:42:03.558877 1 controller.go:381] "Waiting for informer caches to sync"
I1123 08:42:03.558907 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1123 08:42:03.559938 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1123 08:42:03.759127 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1123 08:42:03.759212 1 metrics.go:72] Registering metrics
I1123 08:42:03.759307 1 controller.go:711] "Syncing nftables rules"
I1123 08:42:13.563082 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1123 08:42:13.563142 1 main.go:301] handling current node
I1123 08:42:23.558928 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1123 08:42:23.558968 1 main.go:301] handling current node
==> kube-apiserver [9b79849edeb76ebe3d1f35f60331849eb478148607f83d7e7cc04f6a89d49cef] <==
I1123 08:41:43.257160 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1123 08:41:43.264540 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1123 08:41:43.264801 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1123 08:41:43.264982 1 aggregator.go:166] initial CRD sync complete...
I1123 08:41:43.265064 1 autoregister_controller.go:141] Starting autoregister controller
I1123 08:41:43.265148 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1123 08:41:43.265235 1 cache.go:39] Caches are synced for autoregister controller
I1123 08:41:43.265825 1 apf_controller.go:377] Running API Priority and Fairness config worker
I1123 08:41:43.266130 1 apf_controller.go:380] Running API Priority and Fairness periodic rebalancing process
I1123 08:41:43.266262 1 shared_informer.go:318] Caches are synced for node_authorizer
I1123 08:41:44.072890 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1123 08:41:44.080525 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1123 08:41:44.080644 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1123 08:41:44.750567 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1123 08:41:44.801229 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1123 08:41:44.903834 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1123 08:41:44.910704 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I1123 08:41:44.911792 1 controller.go:624] quota admission added evaluator for: endpoints
I1123 08:41:44.916638 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1123 08:41:45.103928 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1123 08:41:46.604806 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1123 08:41:46.621587 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1123 08:41:46.632358 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1123 08:41:59.838596 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1123 08:41:59.989364 1 controller.go:624] quota admission added evaluator for: replicasets.apps
==> kube-controller-manager [3a3a4da63be8b591cb08202b3fb1a9b242f87a54811f462f72de264b9c1b565d] <==
I1123 08:41:59.234069 1 shared_informer.go:318] Caches are synced for cronjob
I1123 08:41:59.238552 1 shared_informer.go:318] Caches are synced for disruption
I1123 08:41:59.287773 1 shared_informer.go:318] Caches are synced for resource quota
I1123 08:41:59.635666 1 shared_informer.go:318] Caches are synced for garbage collector
I1123 08:41:59.635720 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1123 08:41:59.643988 1 shared_informer.go:318] Caches are synced for garbage collector
I1123 08:41:59.852836 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-mrfgl"
I1123 08:41:59.861554 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-dk6g5"
I1123 08:41:59.994988 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1123 08:42:00.250351 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-q4lbv"
I1123 08:42:00.296011 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-j889m"
I1123 08:42:00.327262 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="332.952552ms"
I1123 08:42:00.350757 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="23.440283ms"
I1123 08:42:00.350887 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="77.187µs"
I1123 08:42:01.812294 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1123 08:42:01.846504 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-j889m"
I1123 08:42:01.870499 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="58.780918ms"
I1123 08:42:01.879884 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="9.337438ms"
I1123 08:42:01.882600 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="46.828µs"
I1123 08:42:13.666896 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="126.5µs"
I1123 08:42:13.681803 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="75.242µs"
I1123 08:42:14.082595 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
I1123 08:42:14.904458 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="120.896µs"
I1123 08:42:14.939466 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="17.592228ms"
I1123 08:42:14.940407 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="44.694µs"
==> kube-proxy [a92786aea3fde1301dec08d36ed3b9e913c480310fa0d744d9a1cf2c70d26621] <==
I1123 08:42:00.964044 1 server_others.go:69] "Using iptables proxy"
I1123 08:42:01.013279 1 node.go:141] Successfully retrieved node IP: 192.168.76.2
I1123 08:42:01.120318 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1123 08:42:01.122189 1 server_others.go:152] "Using iptables Proxier"
I1123 08:42:01.122231 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1123 08:42:01.122239 1 server_others.go:438] "Defaulting to no-op detect-local"
I1123 08:42:01.122283 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1123 08:42:01.122555 1 server.go:846] "Version info" version="v1.28.0"
I1123 08:42:01.122986 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1123 08:42:01.123668 1 config.go:188] "Starting service config controller"
I1123 08:42:01.123741 1 shared_informer.go:311] Waiting for caches to sync for service config
I1123 08:42:01.123783 1 config.go:97] "Starting endpoint slice config controller"
I1123 08:42:01.123795 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1123 08:42:01.124686 1 config.go:315] "Starting node config controller"
I1123 08:42:01.124705 1 shared_informer.go:311] Waiting for caches to sync for node config
I1123 08:42:01.224043 1 shared_informer.go:318] Caches are synced for endpoint slice config
I1123 08:42:01.224109 1 shared_informer.go:318] Caches are synced for service config
I1123 08:42:01.225482 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [81034c6fa713b6148ba16d2f50c50ea8e020311ed53ed7d84f6606a76362fc4f] <==
W1123 08:41:43.659650 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1123 08:41:43.659675 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1123 08:41:43.666035 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1123 08:41:43.666076 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1123 08:41:43.666131 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1123 08:41:43.666152 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1123 08:41:43.666282 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1123 08:41:43.666305 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1123 08:41:43.666372 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1123 08:41:43.666389 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1123 08:41:43.666441 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1123 08:41:43.666456 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1123 08:41:43.666515 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1123 08:41:43.666530 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W1123 08:41:43.666580 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1123 08:41:43.666596 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1123 08:41:43.666648 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1123 08:41:43.666663 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1123 08:41:43.666710 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1123 08:41:43.666725 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1123 08:41:44.500880 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1123 08:41:44.500926 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1123 08:41:44.538573 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1123 08:41:44.538617 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
I1123 08:41:45.053879 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.155079 1561 kuberuntime_manager.go:1463] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.155695 1561 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.867270 1561 topology_manager.go:215] "Topology Admit Handler" podUID="53d90f3f-687b-45a0-a344-321a75f38a20" podNamespace="kube-system" podName="kindnet-mrfgl"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.882839 1561 topology_manager.go:215] "Topology Admit Handler" podUID="27bc489f-26f8-4848-9df2-6530dcad7423" podNamespace="kube-system" podName="kube-proxy-dk6g5"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909264 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/53d90f3f-687b-45a0-a344-321a75f38a20-xtables-lock\") pod \"kindnet-mrfgl\" (UID: \"53d90f3f-687b-45a0-a344-321a75f38a20\") " pod="kube-system/kindnet-mrfgl"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909324 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k598w\" (UniqueName: \"kubernetes.io/projected/53d90f3f-687b-45a0-a344-321a75f38a20-kube-api-access-k598w\") pod \"kindnet-mrfgl\" (UID: \"53d90f3f-687b-45a0-a344-321a75f38a20\") " pod="kube-system/kindnet-mrfgl"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909349 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/27bc489f-26f8-4848-9df2-6530dcad7423-kube-proxy\") pod \"kube-proxy-dk6g5\" (UID: \"27bc489f-26f8-4848-9df2-6530dcad7423\") " pod="kube-system/kube-proxy-dk6g5"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909373 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/27bc489f-26f8-4848-9df2-6530dcad7423-xtables-lock\") pod \"kube-proxy-dk6g5\" (UID: \"27bc489f-26f8-4848-9df2-6530dcad7423\") " pod="kube-system/kube-proxy-dk6g5"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909397 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/27bc489f-26f8-4848-9df2-6530dcad7423-lib-modules\") pod \"kube-proxy-dk6g5\" (UID: \"27bc489f-26f8-4848-9df2-6530dcad7423\") " pod="kube-system/kube-proxy-dk6g5"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909438 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/53d90f3f-687b-45a0-a344-321a75f38a20-cni-cfg\") pod \"kindnet-mrfgl\" (UID: \"53d90f3f-687b-45a0-a344-321a75f38a20\") " pod="kube-system/kindnet-mrfgl"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909462 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/53d90f3f-687b-45a0-a344-321a75f38a20-lib-modules\") pod \"kindnet-mrfgl\" (UID: \"53d90f3f-687b-45a0-a344-321a75f38a20\") " pod="kube-system/kindnet-mrfgl"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909488 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djzpr\" (UniqueName: \"kubernetes.io/projected/27bc489f-26f8-4848-9df2-6530dcad7423-kube-api-access-djzpr\") pod \"kube-proxy-dk6g5\" (UID: \"27bc489f-26f8-4848-9df2-6530dcad7423\") " pod="kube-system/kube-proxy-dk6g5"
Nov 23 08:42:00 old-k8s-version-180638 kubelet[1561]: I1123 08:42:00.902067 1561 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-dk6g5" podStartSLOduration=1.902024352 podCreationTimestamp="2025-11-23 08:41:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 08:42:00.901629237 +0000 UTC m=+14.326420754" watchObservedRunningTime="2025-11-23 08:42:00.902024352 +0000 UTC m=+14.326815869"
Nov 23 08:42:06 old-k8s-version-180638 kubelet[1561]: I1123 08:42:06.732067 1561 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-mrfgl" podStartSLOduration=5.495030012 podCreationTimestamp="2025-11-23 08:41:59 +0000 UTC" firstStartedPulling="2025-11-23 08:42:00.776285773 +0000 UTC m=+14.201077291" lastFinishedPulling="2025-11-23 08:42:03.013276696 +0000 UTC m=+16.438068214" observedRunningTime="2025-11-23 08:42:03.871246056 +0000 UTC m=+17.296037582" watchObservedRunningTime="2025-11-23 08:42:06.732020935 +0000 UTC m=+20.156812461"
Nov 23 08:42:13 old-k8s-version-180638 kubelet[1561]: I1123 08:42:13.622916 1561 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 23 08:42:13 old-k8s-version-180638 kubelet[1561]: I1123 08:42:13.659700 1561 topology_manager.go:215] "Topology Admit Handler" podUID="9a14996d-e910-4a4f-a6f6-f2d8565a4b9c" podNamespace="kube-system" podName="coredns-5dd5756b68-q4lbv"
Nov 23 08:42:13 old-k8s-version-180638 kubelet[1561]: I1123 08:42:13.662325 1561 topology_manager.go:215] "Topology Admit Handler" podUID="fa923b06-d896-468f-8e82-51b4e9df88dc" podNamespace="kube-system" podName="storage-provisioner"
Nov 23 08:42:13 old-k8s-version-180638 kubelet[1561]: I1123 08:42:13.844230 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbqlk\" (UniqueName: \"kubernetes.io/projected/9a14996d-e910-4a4f-a6f6-f2d8565a4b9c-kube-api-access-cbqlk\") pod \"coredns-5dd5756b68-q4lbv\" (UID: \"9a14996d-e910-4a4f-a6f6-f2d8565a4b9c\") " pod="kube-system/coredns-5dd5756b68-q4lbv"
Nov 23 08:42:13 old-k8s-version-180638 kubelet[1561]: I1123 08:42:13.844293 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsph6\" (UniqueName: \"kubernetes.io/projected/fa923b06-d896-468f-8e82-51b4e9df88dc-kube-api-access-wsph6\") pod \"storage-provisioner\" (UID: \"fa923b06-d896-468f-8e82-51b4e9df88dc\") " pod="kube-system/storage-provisioner"
Nov 23 08:42:13 old-k8s-version-180638 kubelet[1561]: I1123 08:42:13.844319 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9a14996d-e910-4a4f-a6f6-f2d8565a4b9c-config-volume\") pod \"coredns-5dd5756b68-q4lbv\" (UID: \"9a14996d-e910-4a4f-a6f6-f2d8565a4b9c\") " pod="kube-system/coredns-5dd5756b68-q4lbv"
Nov 23 08:42:13 old-k8s-version-180638 kubelet[1561]: I1123 08:42:13.844356 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/fa923b06-d896-468f-8e82-51b4e9df88dc-tmp\") pod \"storage-provisioner\" (UID: \"fa923b06-d896-468f-8e82-51b4e9df88dc\") " pod="kube-system/storage-provisioner"
Nov 23 08:42:14 old-k8s-version-180638 kubelet[1561]: I1123 08:42:14.902124 1561 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-q4lbv" podStartSLOduration=14.902081859 podCreationTimestamp="2025-11-23 08:42:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 08:42:14.90156744 +0000 UTC m=+28.326358958" watchObservedRunningTime="2025-11-23 08:42:14.902081859 +0000 UTC m=+28.326873377"
Nov 23 08:42:17 old-k8s-version-180638 kubelet[1561]: I1123 08:42:17.115903 1561 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=15.115773659 podCreationTimestamp="2025-11-23 08:42:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 08:42:14.969166121 +0000 UTC m=+28.393957647" watchObservedRunningTime="2025-11-23 08:42:17.115773659 +0000 UTC m=+30.540565177"
Nov 23 08:42:17 old-k8s-version-180638 kubelet[1561]: I1123 08:42:17.116261 1561 topology_manager.go:215] "Topology Admit Handler" podUID="54457203-a4b0-4bfe-b7e6-9804ec70353f" podNamespace="default" podName="busybox"
Nov 23 08:42:17 old-k8s-version-180638 kubelet[1561]: I1123 08:42:17.162701 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rd6c\" (UniqueName: \"kubernetes.io/projected/54457203-a4b0-4bfe-b7e6-9804ec70353f-kube-api-access-5rd6c\") pod \"busybox\" (UID: \"54457203-a4b0-4bfe-b7e6-9804ec70353f\") " pod="default/busybox"
==> storage-provisioner [d28eb2e2ce19649f4947ef6afbf30d211d2dfa34551b90f0c10c58fdb65b63cd] <==
I1123 08:42:14.310989 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1123 08:42:14.328878 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1123 08:42:14.329183 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1123 08:42:14.342255 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1123 08:42:14.342845 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"46657fa0-d0c5-44e7-b4c5-6303b10aff5f", APIVersion:"v1", ResourceVersion:"414", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-180638_72561ecd-5ccf-4007-bbe6-862fc9539cb1 became leader
I1123 08:42:14.342920 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-180638_72561ecd-5ccf-4007-bbe6-862fc9539cb1!
I1123 08:42:14.443997 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-180638_72561ecd-5ccf-4007-bbe6-862fc9539cb1!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-180638 -n old-k8s-version-180638
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-180638 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-180638
helpers_test.go:243: (dbg) docker inspect old-k8s-version-180638:
-- stdout --
[
{
"Id": "3fb449072f419f1d1ff9eebb56f96c76cc24ab8ceb8213db71616f0ddddcbb9f",
"Created": "2025-11-23T08:41:19.865592877Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 197224,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-23T08:41:19.943635138Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:572c983e466f1f784136812eef5cc59ac623db764bc7704d3676c4643993fd08",
"ResolvConfPath": "/var/lib/docker/containers/3fb449072f419f1d1ff9eebb56f96c76cc24ab8ceb8213db71616f0ddddcbb9f/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/3fb449072f419f1d1ff9eebb56f96c76cc24ab8ceb8213db71616f0ddddcbb9f/hostname",
"HostsPath": "/var/lib/docker/containers/3fb449072f419f1d1ff9eebb56f96c76cc24ab8ceb8213db71616f0ddddcbb9f/hosts",
"LogPath": "/var/lib/docker/containers/3fb449072f419f1d1ff9eebb56f96c76cc24ab8ceb8213db71616f0ddddcbb9f/3fb449072f419f1d1ff9eebb56f96c76cc24ab8ceb8213db71616f0ddddcbb9f-json.log",
"Name": "/old-k8s-version-180638",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-180638:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "old-k8s-version-180638",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "3fb449072f419f1d1ff9eebb56f96c76cc24ab8ceb8213db71616f0ddddcbb9f",
"LowerDir": "/var/lib/docker/overlay2/3a0f954d6f7082ad577dca92fa6658b1e327bb820ce9a801d55d584f14165f01-init/diff:/var/lib/docker/overlay2/88c30082a717909d357f7d81c88a05ce3487a40d372ee6dc57fb9f012e0502da/diff",
"MergedDir": "/var/lib/docker/overlay2/3a0f954d6f7082ad577dca92fa6658b1e327bb820ce9a801d55d584f14165f01/merged",
"UpperDir": "/var/lib/docker/overlay2/3a0f954d6f7082ad577dca92fa6658b1e327bb820ce9a801d55d584f14165f01/diff",
"WorkDir": "/var/lib/docker/overlay2/3a0f954d6f7082ad577dca92fa6658b1e327bb820ce9a801d55d584f14165f01/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-180638",
"Source": "/var/lib/docker/volumes/old-k8s-version-180638/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-180638",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-180638",
"name.minikube.sigs.k8s.io": "old-k8s-version-180638",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "c7b0d9b425062d52a0c8052c45b2a62780ff3f6f2620c50e9e88251d56098ed9",
"SandboxKey": "/var/run/docker/netns/c7b0d9b42506",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33053"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33054"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33057"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33055"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33056"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"old-k8s-version-180638": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "66:cc:5c:df:67:d2",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "ec0f96b222364b6472248735ae9433b2f33bdeaa152953368412a68215eb42c4",
"EndpointID": "20998764ba69f988f94705bb48be4dc33edbb29c350250a4be2539cea69e130e",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-180638",
"3fb449072f41"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-180638 -n old-k8s-version-180638
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p old-k8s-version-180638 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p old-k8s-version-180638 logs -n 25: (1.238926736s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────┬─────────┬─────────┬─────────────────────┬────────────
─────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────┼─────────┼─────────┼─────────────────────┼────────────
─────────┤
│ ssh │ -p cilium-440243 sudo cat /etc/docker/daemon.json │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo docker system info │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo systemctl status cri-docker --all --full --no-pager │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo systemctl cat cri-docker --no-pager │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo cri-dockerd --version │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo systemctl status containerd --all --full --no-pager │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo systemctl cat containerd --no-pager │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo cat /lib/systemd/system/containerd.service │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo cat /etc/containerd/config.toml │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo containerd config dump │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo systemctl status crio --all --full --no-pager │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo systemctl cat crio --no-pager │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ ssh │ -p cilium-440243 sudo crio config │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ │
│ delete │ -p cilium-440243 │ cilium-440243 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ 23 Nov 25 08:39 UTC │
│ start │ -p cert-expiration-119748 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=containerd │ cert-expiration-119748 │ jenkins │ v1.37.0 │ 23 Nov 25 08:39 UTC │ 23 Nov 25 08:40 UTC │
│ ssh │ force-systemd-env-760522 ssh cat /etc/containerd/config.toml │ force-systemd-env-760522 │ jenkins │ v1.37.0 │ 23 Nov 25 08:40 UTC │ 23 Nov 25 08:40 UTC │
│ delete │ -p force-systemd-env-760522 │ force-systemd-env-760522 │ jenkins │ v1.37.0 │ 23 Nov 25 08:40 UTC │ 23 Nov 25 08:40 UTC │
│ start │ -p cert-options-106536 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-106536 │ jenkins │ v1.37.0 │ 23 Nov 25 08:40 UTC │ 23 Nov 25 08:41 UTC │
│ ssh │ cert-options-106536 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-106536 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ ssh │ -p cert-options-106536 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-106536 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ delete │ -p cert-options-106536 │ cert-options-106536 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ start │ -p old-k8s-version-180638 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-180638 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:42 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────┴─────────┴─────────┴─────────────────────┴────────────
─────────┘
==> Last Start <==
Log file created at: 2025/11/23 08:41:13
Running on machine: ip-172-31-31-251
Binary: Built with gc go1.25.3 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1123 08:41:13.503798 196829 out.go:360] Setting OutFile to fd 1 ...
I1123 08:41:13.504001 196829 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:41:13.504037 196829 out.go:374] Setting ErrFile to fd 2...
I1123 08:41:13.504057 196829 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:41:13.504449 196829 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-2339/.minikube/bin
I1123 08:41:13.504989 196829 out.go:368] Setting JSON to false
I1123 08:41:13.507307 196829 start.go:133] hostinfo: {"hostname":"ip-172-31-31-251","uptime":5022,"bootTime":1763882251,"procs":187,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
I1123 08:41:13.507402 196829 start.go:143] virtualization:
I1123 08:41:13.511220 196829 out.go:179] * [old-k8s-version-180638] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I1123 08:41:13.515732 196829 out.go:179] - MINIKUBE_LOCATION=21966
I1123 08:41:13.516085 196829 notify.go:221] Checking for updates...
I1123 08:41:13.523195 196829 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1123 08:41:13.526521 196829 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21966-2339/kubeconfig
I1123 08:41:13.529705 196829 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-2339/.minikube
I1123 08:41:13.532894 196829 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I1123 08:41:13.536018 196829 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1123 08:41:13.539629 196829 config.go:182] Loaded profile config "cert-expiration-119748": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1123 08:41:13.539739 196829 driver.go:422] Setting default libvirt URI to qemu:///system
I1123 08:41:13.574366 196829 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I1123 08:41:13.574516 196829 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1123 08:41:13.638032 196829 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-23 08:41:13.62864309 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aa
rch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pa
th:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1123 08:41:13.638135 196829 docker.go:319] overlay module found
I1123 08:41:13.643635 196829 out.go:179] * Using the docker driver based on user configuration
I1123 08:41:13.646835 196829 start.go:309] selected driver: docker
I1123 08:41:13.646859 196829 start.go:927] validating driver "docker" against <nil>
I1123 08:41:13.646879 196829 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1123 08:41:13.647612 196829 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1123 08:41:13.702166 196829 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-23 08:41:13.693228668 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1123 08:41:13.702317 196829 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1123 08:41:13.702534 196829 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 08:41:13.705700 196829 out.go:179] * Using Docker driver with root privileges
I1123 08:41:13.708681 196829 cni.go:84] Creating CNI manager for ""
I1123 08:41:13.708750 196829 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 08:41:13.708770 196829 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1123 08:41:13.708863 196829 start.go:353] cluster config:
{Name:old-k8s-version-180638 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-180638 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSH
AuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:41:13.711891 196829 out.go:179] * Starting "old-k8s-version-180638" primary control-plane node in "old-k8s-version-180638" cluster
I1123 08:41:13.714733 196829 cache.go:134] Beginning downloading kic base image for docker with containerd
I1123 08:41:13.717633 196829 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1123 08:41:13.720589 196829 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1123 08:41:13.720638 196829 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21966-2339/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4
I1123 08:41:13.720665 196829 cache.go:65] Caching tarball of preloaded images
I1123 08:41:13.720676 196829 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1123 08:41:13.720783 196829 preload.go:238] Found /home/jenkins/minikube-integration/21966-2339/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I1123 08:41:13.720794 196829 cache.go:68] Finished verifying existence of preloaded tar for v1.28.0 on containerd
I1123 08:41:13.720923 196829 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/config.json ...
I1123 08:41:13.720948 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/config.json: {Name:mk3fa6091d320fb60049f236674c350f36f8b1c3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:13.740066 196829 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1123 08:41:13.740090 196829 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1123 08:41:13.740110 196829 cache.go:243] Successfully downloaded all kic artifacts
I1123 08:41:13.740140 196829 start.go:360] acquireMachinesLock for old-k8s-version-180638: {Name:mk02adabcbe3b4194eb9b9cf13dfbc9bffd5d61a Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:41:13.740251 196829 start.go:364] duration metric: took 92.325µs to acquireMachinesLock for "old-k8s-version-180638"
I1123 08:41:13.740280 196829 start.go:93] Provisioning new machine with config: &{Name:old-k8s-version-180638 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-180638 Namespace:default APIServerHAVIP:
APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:f
alse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1123 08:41:13.740345 196829 start.go:125] createHost starting for "" (driver="docker")
I1123 08:41:13.743708 196829 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1123 08:41:13.743928 196829 start.go:159] libmachine.API.Create for "old-k8s-version-180638" (driver="docker")
I1123 08:41:13.743964 196829 client.go:173] LocalClient.Create starting
I1123 08:41:13.744044 196829 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca.pem
I1123 08:41:13.744081 196829 main.go:143] libmachine: Decoding PEM data...
I1123 08:41:13.744099 196829 main.go:143] libmachine: Parsing certificate...
I1123 08:41:13.744156 196829 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21966-2339/.minikube/certs/cert.pem
I1123 08:41:13.744179 196829 main.go:143] libmachine: Decoding PEM data...
I1123 08:41:13.744191 196829 main.go:143] libmachine: Parsing certificate...
I1123 08:41:13.744566 196829 cli_runner.go:164] Run: docker network inspect old-k8s-version-180638 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1123 08:41:13.760425 196829 cli_runner.go:211] docker network inspect old-k8s-version-180638 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1123 08:41:13.760511 196829 network_create.go:284] running [docker network inspect old-k8s-version-180638] to gather additional debugging logs...
I1123 08:41:13.760531 196829 cli_runner.go:164] Run: docker network inspect old-k8s-version-180638
W1123 08:41:13.775922 196829 cli_runner.go:211] docker network inspect old-k8s-version-180638 returned with exit code 1
I1123 08:41:13.775955 196829 network_create.go:287] error running [docker network inspect old-k8s-version-180638]: docker network inspect old-k8s-version-180638: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-180638 not found
I1123 08:41:13.775968 196829 network_create.go:289] output of [docker network inspect old-k8s-version-180638]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-180638 not found
** /stderr **
I1123 08:41:13.776076 196829 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1123 08:41:13.792199 196829 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-a946cc9c0edf IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:de:ea:52:17:a9:7a} reservation:<nil>}
I1123 08:41:13.792559 196829 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-fb33daef15c9 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:aa:08:1d:d1:c6:df} reservation:<nil>}
I1123 08:41:13.792931 196829 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-fb61edac6088 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:e6:64:59:e2:c3:5a} reservation:<nil>}
I1123 08:41:13.793382 196829 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001a1e140}
I1123 08:41:13.793443 196829 network_create.go:124] attempt to create docker network old-k8s-version-180638 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I1123 08:41:13.793513 196829 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-180638 old-k8s-version-180638
I1123 08:41:13.859515 196829 network_create.go:108] docker network old-k8s-version-180638 192.168.76.0/24 created
I1123 08:41:13.859564 196829 kic.go:121] calculated static IP "192.168.76.2" for the "old-k8s-version-180638" container
I1123 08:41:13.859638 196829 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1123 08:41:13.877503 196829 cli_runner.go:164] Run: docker volume create old-k8s-version-180638 --label name.minikube.sigs.k8s.io=old-k8s-version-180638 --label created_by.minikube.sigs.k8s.io=true
I1123 08:41:13.898930 196829 oci.go:103] Successfully created a docker volume old-k8s-version-180638
I1123 08:41:13.899032 196829 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-180638-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-180638 --entrypoint /usr/bin/test -v old-k8s-version-180638:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1123 08:41:14.458747 196829 oci.go:107] Successfully prepared a docker volume old-k8s-version-180638
I1123 08:41:14.458805 196829 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1123 08:41:14.458814 196829 kic.go:194] Starting extracting preloaded images to volume ...
I1123 08:41:14.458892 196829 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21966-2339/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-180638:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir
I1123 08:41:19.794152 196829 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21966-2339/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-180638:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir: (5.335195842s)
I1123 08:41:19.794189 196829 kic.go:203] duration metric: took 5.335371475s to extract preloaded images to volume ...
W1123 08:41:19.794328 196829 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1123 08:41:19.794436 196829 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1123 08:41:19.848844 196829 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-180638 --name old-k8s-version-180638 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-180638 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-180638 --network old-k8s-version-180638 --ip 192.168.76.2 --volume old-k8s-version-180638:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1123 08:41:20.177907 196829 cli_runner.go:164] Run: docker container inspect old-k8s-version-180638 --format={{.State.Running}}
I1123 08:41:20.204948 196829 cli_runner.go:164] Run: docker container inspect old-k8s-version-180638 --format={{.State.Status}}
I1123 08:41:20.227539 196829 cli_runner.go:164] Run: docker exec old-k8s-version-180638 stat /var/lib/dpkg/alternatives/iptables
I1123 08:41:20.289856 196829 oci.go:144] the created container "old-k8s-version-180638" has a running status.
I1123 08:41:20.289891 196829 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa...
I1123 08:41:20.448285 196829 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1123 08:41:20.475665 196829 cli_runner.go:164] Run: docker container inspect old-k8s-version-180638 --format={{.State.Status}}
I1123 08:41:20.521617 196829 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1123 08:41:20.521635 196829 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-180638 chown docker:docker /home/docker/.ssh/authorized_keys]
I1123 08:41:20.589359 196829 cli_runner.go:164] Run: docker container inspect old-k8s-version-180638 --format={{.State.Status}}
I1123 08:41:20.625639 196829 machine.go:94] provisionDockerMachine start ...
I1123 08:41:20.625720 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:20.654376 196829 main.go:143] libmachine: Using SSH client type: native
I1123 08:41:20.655192 196829 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33053 <nil> <nil>}
I1123 08:41:20.655341 196829 main.go:143] libmachine: About to run SSH command:
hostname
I1123 08:41:20.656290 196829 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1123 08:41:23.816940 196829 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-180638
I1123 08:41:23.816964 196829 ubuntu.go:182] provisioning hostname "old-k8s-version-180638"
I1123 08:41:23.817040 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:23.833840 196829 main.go:143] libmachine: Using SSH client type: native
I1123 08:41:23.834172 196829 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33053 <nil> <nil>}
I1123 08:41:23.834187 196829 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-180638 && echo "old-k8s-version-180638" | sudo tee /etc/hostname
I1123 08:41:23.999609 196829 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-180638
I1123 08:41:23.999698 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:24.020254 196829 main.go:143] libmachine: Using SSH client type: native
I1123 08:41:24.020584 196829 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33053 <nil> <nil>}
I1123 08:41:24.020601 196829 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-180638' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-180638/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-180638' | sudo tee -a /etc/hosts;
fi
fi
I1123 08:41:24.185924 196829 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:41:24.185946 196829 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21966-2339/.minikube CaCertPath:/home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21966-2339/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21966-2339/.minikube}
I1123 08:41:24.185967 196829 ubuntu.go:190] setting up certificates
I1123 08:41:24.185976 196829 provision.go:84] configureAuth start
I1123 08:41:24.186052 196829 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-180638
I1123 08:41:24.215320 196829 provision.go:143] copyHostCerts
I1123 08:41:24.215378 196829 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-2339/.minikube/ca.pem, removing ...
I1123 08:41:24.215387 196829 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-2339/.minikube/ca.pem
I1123 08:41:24.215451 196829 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21966-2339/.minikube/ca.pem (1078 bytes)
I1123 08:41:24.215548 196829 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-2339/.minikube/cert.pem, removing ...
I1123 08:41:24.215553 196829 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-2339/.minikube/cert.pem
I1123 08:41:24.215581 196829 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21966-2339/.minikube/cert.pem (1123 bytes)
I1123 08:41:24.215633 196829 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-2339/.minikube/key.pem, removing ...
I1123 08:41:24.215638 196829 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-2339/.minikube/key.pem
I1123 08:41:24.215661 196829 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21966-2339/.minikube/key.pem (1675 bytes)
I1123 08:41:24.216026 196829 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21966-2339/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-180638 san=[127.0.0.1 192.168.76.2 localhost minikube old-k8s-version-180638]
I1123 08:41:24.624778 196829 provision.go:177] copyRemoteCerts
I1123 08:41:24.624888 196829 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1123 08:41:24.624959 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:24.646886 196829 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa Username:docker}
I1123 08:41:24.753771 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1123 08:41:24.771993 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1123 08:41:24.790069 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1123 08:41:24.807496 196829 provision.go:87] duration metric: took 621.497153ms to configureAuth
I1123 08:41:24.807563 196829 ubuntu.go:206] setting minikube options for container-runtime
I1123 08:41:24.807769 196829 config.go:182] Loaded profile config "old-k8s-version-180638": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1123 08:41:24.807806 196829 machine.go:97] duration metric: took 4.182148274s to provisionDockerMachine
I1123 08:41:24.807853 196829 client.go:176] duration metric: took 11.063877137s to LocalClient.Create
I1123 08:41:24.807895 196829 start.go:167] duration metric: took 11.063966541s to libmachine.API.Create "old-k8s-version-180638"
I1123 08:41:24.807925 196829 start.go:293] postStartSetup for "old-k8s-version-180638" (driver="docker")
I1123 08:41:24.807964 196829 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1123 08:41:24.808042 196829 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1123 08:41:24.808096 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:24.825195 196829 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa Username:docker}
I1123 08:41:24.930003 196829 ssh_runner.go:195] Run: cat /etc/os-release
I1123 08:41:24.933389 196829 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1123 08:41:24.933440 196829 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1123 08:41:24.933453 196829 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-2339/.minikube/addons for local assets ...
I1123 08:41:24.933516 196829 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-2339/.minikube/files for local assets ...
I1123 08:41:24.933597 196829 filesync.go:149] local asset: /home/jenkins/minikube-integration/21966-2339/.minikube/files/etc/ssl/certs/41512.pem -> 41512.pem in /etc/ssl/certs
I1123 08:41:24.933700 196829 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1123 08:41:24.941173 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/files/etc/ssl/certs/41512.pem --> /etc/ssl/certs/41512.pem (1708 bytes)
I1123 08:41:24.960763 196829 start.go:296] duration metric: took 152.794115ms for postStartSetup
I1123 08:41:24.961139 196829 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-180638
I1123 08:41:24.978306 196829 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/config.json ...
I1123 08:41:24.978587 196829 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1123 08:41:24.978642 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:24.994847 196829 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa Username:docker}
I1123 08:41:25.098792 196829 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1123 08:41:25.103719 196829 start.go:128] duration metric: took 11.363355721s to createHost
I1123 08:41:25.103745 196829 start.go:83] releasing machines lock for "old-k8s-version-180638", held for 11.363481187s
I1123 08:41:25.103820 196829 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-180638
I1123 08:41:25.123598 196829 ssh_runner.go:195] Run: cat /version.json
I1123 08:41:25.123615 196829 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1123 08:41:25.123646 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:25.123677 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:41:25.149385 196829 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa Username:docker}
I1123 08:41:25.159257 196829 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa Username:docker}
I1123 08:41:25.253035 196829 ssh_runner.go:195] Run: systemctl --version
I1123 08:41:25.348445 196829 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1123 08:41:25.352830 196829 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1123 08:41:25.352933 196829 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1123 08:41:25.381383 196829 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I1123 08:41:25.381469 196829 start.go:496] detecting cgroup driver to use...
I1123 08:41:25.381508 196829 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1123 08:41:25.381570 196829 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1123 08:41:25.397040 196829 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:41:25.410260 196829 docker.go:218] disabling cri-docker service (if available) ...
I1123 08:41:25.410362 196829 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1123 08:41:25.428008 196829 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1123 08:41:25.447082 196829 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1123 08:41:25.620588 196829 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1123 08:41:25.749588 196829 docker.go:234] disabling docker service ...
I1123 08:41:25.749661 196829 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1123 08:41:25.772076 196829 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1123 08:41:25.784914 196829 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1123 08:41:25.899082 196829 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1123 08:41:26.009981 196829 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1123 08:41:26.025315 196829 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:41:26.039953 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1123 08:41:26.049471 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1123 08:41:26.059847 196829 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1123 08:41:26.060009 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1123 08:41:26.069667 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:41:26.079903 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1123 08:41:26.089816 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:41:26.099752 196829 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1123 08:41:26.108060 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1123 08:41:26.117585 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1123 08:41:26.126366 196829 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1123 08:41:26.135803 196829 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1123 08:41:26.143649 196829 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1123 08:41:26.151206 196829 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:41:26.281475 196829 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1123 08:41:26.394263 196829 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1123 08:41:26.394379 196829 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1123 08:41:26.398397 196829 start.go:564] Will wait 60s for crictl version
I1123 08:41:26.398525 196829 ssh_runner.go:195] Run: which crictl
I1123 08:41:26.402050 196829 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1123 08:41:26.433447 196829 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1123 08:41:26.433548 196829 ssh_runner.go:195] Run: containerd --version
I1123 08:41:26.456534 196829 ssh_runner.go:195] Run: containerd --version
I1123 08:41:26.486458 196829 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1123 08:41:26.489565 196829 cli_runner.go:164] Run: docker network inspect old-k8s-version-180638 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1123 08:41:26.507660 196829 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I1123 08:41:26.511689 196829 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:41:26.521591 196829 kubeadm.go:884] updating cluster {Name:old-k8s-version-180638 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-180638 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1123 08:41:26.521716 196829 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1123 08:41:26.521782 196829 ssh_runner.go:195] Run: sudo crictl images --output json
I1123 08:41:26.552790 196829 containerd.go:627] all images are preloaded for containerd runtime.
I1123 08:41:26.552815 196829 containerd.go:534] Images already preloaded, skipping extraction
I1123 08:41:26.552879 196829 ssh_runner.go:195] Run: sudo crictl images --output json
I1123 08:41:26.589503 196829 containerd.go:627] all images are preloaded for containerd runtime.
I1123 08:41:26.589526 196829 cache_images.go:86] Images are preloaded, skipping loading
I1123 08:41:26.589533 196829 kubeadm.go:935] updating node { 192.168.76.2 8443 v1.28.0 containerd true true} ...
I1123 08:41:26.589674 196829 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-180638 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-180638 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1123 08:41:26.589739 196829 ssh_runner.go:195] Run: sudo crictl info
I1123 08:41:26.615213 196829 cni.go:84] Creating CNI manager for ""
I1123 08:41:26.615295 196829 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 08:41:26.615324 196829 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1123 08:41:26.615377 196829 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-180638 NodeName:old-k8s-version-180638 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt
StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1123 08:41:26.615549 196829 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-180638"
kubeletExtraArgs:
node-ip: 192.168.76.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1123 08:41:26.615640 196829 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1123 08:41:26.623537 196829 binaries.go:51] Found k8s binaries, skipping transfer
I1123 08:41:26.623635 196829 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1123 08:41:26.631295 196829 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1123 08:41:26.643882 196829 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1123 08:41:26.657243 196829 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2176 bytes)
I1123 08:41:26.669640 196829 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I1123 08:41:26.673282 196829 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:41:26.685864 196829 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:41:26.794513 196829 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:41:26.810973 196829 certs.go:69] Setting up /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638 for IP: 192.168.76.2
I1123 08:41:26.811039 196829 certs.go:195] generating shared ca certs ...
I1123 08:41:26.811080 196829 certs.go:227] acquiring lock for ca certs: {Name:mke0fc62f41acbef5eb3e84af3a3b8f9858bd1fc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:26.811250 196829 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21966-2339/.minikube/ca.key
I1123 08:41:26.811333 196829 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21966-2339/.minikube/proxy-client-ca.key
I1123 08:41:26.811355 196829 certs.go:257] generating profile certs ...
I1123 08:41:26.811440 196829 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/client.key
I1123 08:41:26.811477 196829 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/client.crt with IP's: []
I1123 08:41:26.973605 196829 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/client.crt ...
I1123 08:41:26.973639 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/client.crt: {Name:mke32e0874274fa8086c901b1e6afbf9faff17cf Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:26.973836 196829 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/client.key ...
I1123 08:41:26.973854 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/client.key: {Name:mk164b3f8143768da540cf1b000f576503ef0774 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:26.974478 196829 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.key.28528907
I1123 08:41:26.974505 196829 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.crt.28528907 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I1123 08:41:27.162797 196829 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.crt.28528907 ...
I1123 08:41:27.162827 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.crt.28528907: {Name:mk89f25fc4240f5ec0b53706cf7a05d65ec41dcd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:27.163533 196829 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.key.28528907 ...
I1123 08:41:27.163550 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.key.28528907: {Name:mkceae69a15be6eedc78c0f192aa68e5077c2c60 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:27.164156 196829 certs.go:382] copying /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.crt.28528907 -> /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.crt
I1123 08:41:27.164252 196829 certs.go:386] copying /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.key.28528907 -> /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.key
I1123 08:41:27.164317 196829 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.key
I1123 08:41:27.164337 196829 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.crt with IP's: []
I1123 08:41:27.589335 196829 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.crt ...
I1123 08:41:27.589366 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.crt: {Name:mk5e88fa47e7c5af72b6e967a38cd87e0cc58d20 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:27.590109 196829 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.key ...
I1123 08:41:27.590126 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.key: {Name:mka6f06ef565fc329562ab2f39faf7c67e598a55 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:41:27.590847 196829 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/4151.pem (1338 bytes)
W1123 08:41:27.590897 196829 certs.go:480] ignoring /home/jenkins/minikube-integration/21966-2339/.minikube/certs/4151_empty.pem, impossibly tiny 0 bytes
I1123 08:41:27.590910 196829 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca-key.pem (1679 bytes)
I1123 08:41:27.590954 196829 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/ca.pem (1078 bytes)
I1123 08:41:27.590984 196829 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/cert.pem (1123 bytes)
I1123 08:41:27.591012 196829 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-2339/.minikube/certs/key.pem (1675 bytes)
I1123 08:41:27.591064 196829 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-2339/.minikube/files/etc/ssl/certs/41512.pem (1708 bytes)
I1123 08:41:27.591653 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1123 08:41:27.611397 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1123 08:41:27.628655 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1123 08:41:27.646428 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1123 08:41:27.663648 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1123 08:41:27.680373 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1123 08:41:27.697528 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1123 08:41:27.718625 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/profiles/old-k8s-version-180638/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1123 08:41:27.735969 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1123 08:41:27.753670 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/certs/4151.pem --> /usr/share/ca-certificates/4151.pem (1338 bytes)
I1123 08:41:27.772203 196829 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-2339/.minikube/files/etc/ssl/certs/41512.pem --> /usr/share/ca-certificates/41512.pem (1708 bytes)
I1123 08:41:27.790388 196829 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1123 08:41:27.803782 196829 ssh_runner.go:195] Run: openssl version
I1123 08:41:27.810231 196829 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/4151.pem && ln -fs /usr/share/ca-certificates/4151.pem /etc/ssl/certs/4151.pem"
I1123 08:41:27.818398 196829 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/4151.pem
I1123 08:41:27.822235 196829 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 23 08:02 /usr/share/ca-certificates/4151.pem
I1123 08:41:27.822298 196829 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/4151.pem
I1123 08:41:27.864039 196829 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/4151.pem /etc/ssl/certs/51391683.0"
I1123 08:41:27.872287 196829 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/41512.pem && ln -fs /usr/share/ca-certificates/41512.pem /etc/ssl/certs/41512.pem"
I1123 08:41:27.880642 196829 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/41512.pem
I1123 08:41:27.884373 196829 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 23 08:02 /usr/share/ca-certificates/41512.pem
I1123 08:41:27.884446 196829 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/41512.pem
I1123 08:41:27.925706 196829 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/41512.pem /etc/ssl/certs/3ec20f2e.0"
I1123 08:41:27.933986 196829 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1123 08:41:27.942212 196829 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1123 08:41:27.945912 196829 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 23 07:56 /usr/share/ca-certificates/minikubeCA.pem
I1123 08:41:27.945995 196829 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1123 08:41:27.987134 196829 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1123 08:41:27.995374 196829 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1123 08:41:27.999559 196829 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1123 08:41:27.999640 196829 kubeadm.go:401] StartCluster: {Name:old-k8s-version-180638 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-180638 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:41:27.999724 196829 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1123 08:41:27.999901 196829 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1123 08:41:28.030022 196829 cri.go:89] found id: ""
I1123 08:41:28.030090 196829 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1123 08:41:28.038618 196829 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1123 08:41:28.046519 196829 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1123 08:41:28.046606 196829 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1123 08:41:28.054666 196829 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1123 08:41:28.054688 196829 kubeadm.go:158] found existing configuration files:
I1123 08:41:28.054763 196829 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1123 08:41:28.062722 196829 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1123 08:41:28.062824 196829 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1123 08:41:28.070543 196829 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1123 08:41:28.078377 196829 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1123 08:41:28.078469 196829 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1123 08:41:28.085999 196829 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1123 08:41:28.093970 196829 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1123 08:41:28.094044 196829 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1123 08:41:28.101534 196829 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1123 08:41:28.109634 196829 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1123 08:41:28.109755 196829 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1123 08:41:28.117144 196829 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1123 08:41:28.212901 196829 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I1123 08:41:28.307897 196829 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1123 08:41:46.723355 196829 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1123 08:41:46.723418 196829 kubeadm.go:319] [preflight] Running pre-flight checks
I1123 08:41:46.723506 196829 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1123 08:41:46.723561 196829 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I1123 08:41:46.723595 196829 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1123 08:41:46.723640 196829 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1123 08:41:46.723688 196829 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1123 08:41:46.723735 196829 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1123 08:41:46.723783 196829 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1123 08:41:46.723830 196829 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1123 08:41:46.723879 196829 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1123 08:41:46.723925 196829 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1123 08:41:46.723972 196829 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1123 08:41:46.724018 196829 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1123 08:41:46.724090 196829 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1123 08:41:46.724184 196829 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1123 08:41:46.724277 196829 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1123 08:41:46.724339 196829 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1123 08:41:46.730394 196829 out.go:252] - Generating certificates and keys ...
I1123 08:41:46.730493 196829 kubeadm.go:319] [certs] Using existing ca certificate authority
I1123 08:41:46.730559 196829 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1123 08:41:46.730625 196829 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1123 08:41:46.730681 196829 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1123 08:41:46.730740 196829 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1123 08:41:46.730789 196829 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1123 08:41:46.730843 196829 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1123 08:41:46.730979 196829 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-180638] and IPs [192.168.76.2 127.0.0.1 ::1]
I1123 08:41:46.731033 196829 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1123 08:41:46.731156 196829 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-180638] and IPs [192.168.76.2 127.0.0.1 ::1]
I1123 08:41:46.731221 196829 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1123 08:41:46.731283 196829 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1123 08:41:46.731327 196829 kubeadm.go:319] [certs] Generating "sa" key and public key
I1123 08:41:46.731382 196829 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1123 08:41:46.731432 196829 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1123 08:41:46.731487 196829 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1123 08:41:46.731552 196829 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1123 08:41:46.731606 196829 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1123 08:41:46.731687 196829 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1123 08:41:46.732404 196829 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1123 08:41:46.735499 196829 out.go:252] - Booting up control plane ...
I1123 08:41:46.735693 196829 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1123 08:41:46.735790 196829 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1123 08:41:46.735869 196829 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1123 08:41:46.735991 196829 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1123 08:41:46.736083 196829 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1123 08:41:46.736124 196829 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1123 08:41:46.736298 196829 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1123 08:41:46.736379 196829 kubeadm.go:319] [apiclient] All control plane components are healthy after 7.016975 seconds
I1123 08:41:46.736508 196829 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1123 08:41:46.736649 196829 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1123 08:41:46.736716 196829 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1123 08:41:46.737049 196829 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-180638 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1123 08:41:46.737114 196829 kubeadm.go:319] [bootstrap-token] Using token: 89uxh1.yt288j2wm2p51h2c
I1123 08:41:46.740440 196829 out.go:252] - Configuring RBAC rules ...
I1123 08:41:46.740562 196829 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1123 08:41:46.740658 196829 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1123 08:41:46.740805 196829 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1123 08:41:46.740950 196829 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1123 08:41:46.741070 196829 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1123 08:41:46.741162 196829 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1123 08:41:46.741276 196829 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1123 08:41:46.741318 196829 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1123 08:41:46.741363 196829 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1123 08:41:46.741369 196829 kubeadm.go:319]
I1123 08:41:46.741466 196829 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1123 08:41:46.741471 196829 kubeadm.go:319]
I1123 08:41:46.741547 196829 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1123 08:41:46.741551 196829 kubeadm.go:319]
I1123 08:41:46.741575 196829 kubeadm.go:319] mkdir -p $HOME/.kube
I1123 08:41:46.741639 196829 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1123 08:41:46.741693 196829 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1123 08:41:46.741696 196829 kubeadm.go:319]
I1123 08:41:46.741757 196829 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1123 08:41:46.741761 196829 kubeadm.go:319]
I1123 08:41:46.741808 196829 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1123 08:41:46.741811 196829 kubeadm.go:319]
I1123 08:41:46.741868 196829 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1123 08:41:46.741944 196829 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1123 08:41:46.742020 196829 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1123 08:41:46.742024 196829 kubeadm.go:319]
I1123 08:41:46.742111 196829 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1123 08:41:46.742188 196829 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1123 08:41:46.742192 196829 kubeadm.go:319]
I1123 08:41:46.742277 196829 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 89uxh1.yt288j2wm2p51h2c \
I1123 08:41:46.742380 196829 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:4f35f48b47910e0f0424b1b0ace7d03cfc1e6ef5b162b679e98eef4f3a64a5a5 \
I1123 08:41:46.742400 196829 kubeadm.go:319] --control-plane
I1123 08:41:46.742404 196829 kubeadm.go:319]
I1123 08:41:46.742493 196829 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1123 08:41:46.742497 196829 kubeadm.go:319]
I1123 08:41:46.742578 196829 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 89uxh1.yt288j2wm2p51h2c \
I1123 08:41:46.742696 196829 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:4f35f48b47910e0f0424b1b0ace7d03cfc1e6ef5b162b679e98eef4f3a64a5a5
I1123 08:41:46.742705 196829 cni.go:84] Creating CNI manager for ""
I1123 08:41:46.742712 196829 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 08:41:46.747905 196829 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1123 08:41:46.750796 196829 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1123 08:41:46.761561 196829 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1123 08:41:46.761582 196829 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1123 08:41:46.780526 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1123 08:41:47.782764 196829 ssh_runner.go:235] Completed: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml: (1.002206277s)
I1123 08:41:47.782810 196829 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1123 08:41:47.782925 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:47.783012 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-180638 minikube.k8s.io/updated_at=2025_11_23T08_41_47_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=3e219827a5f064cf736992b79e59864301ece66e minikube.k8s.io/name=old-k8s-version-180638 minikube.k8s.io/primary=true
I1123 08:41:47.996747 196829 ops.go:34] apiserver oom_adj: -16
I1123 08:41:47.996865 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:48.497263 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:48.997587 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:49.497238 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:49.996982 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:50.497817 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:50.996983 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:51.497681 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:51.997616 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:52.497659 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:52.997821 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:53.497324 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:53.997887 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:54.496981 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:54.996975 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:55.496982 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:55.997716 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:56.497689 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:56.997844 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:57.497606 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:57.997246 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:58.497272 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:58.997225 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:59.497615 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:41:59.996938 196829 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:42:00.373202 196829 kubeadm.go:1114] duration metric: took 12.590316137s to wait for elevateKubeSystemPrivileges
I1123 08:42:00.373235 196829 kubeadm.go:403] duration metric: took 32.37359943s to StartCluster
I1123 08:42:00.373254 196829 settings.go:142] acquiring lock: {Name:mkfb77243b31dfe604b438e7da3f1bce2ba7b5a6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:42:00.373329 196829 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21966-2339/kubeconfig
I1123 08:42:00.374576 196829 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-2339/kubeconfig: {Name:mka042f83263da2d190b70c2277735bf705fab5c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:42:00.374865 196829 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1123 08:42:00.375126 196829 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1123 08:42:00.375440 196829 config.go:182] Loaded profile config "old-k8s-version-180638": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1123 08:42:00.375497 196829 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1123 08:42:00.375560 196829 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-180638"
I1123 08:42:00.375575 196829 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-180638"
I1123 08:42:00.375597 196829 host.go:66] Checking if "old-k8s-version-180638" exists ...
I1123 08:42:00.375813 196829 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-180638"
I1123 08:42:00.375848 196829 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-180638"
I1123 08:42:00.376308 196829 cli_runner.go:164] Run: docker container inspect old-k8s-version-180638 --format={{.State.Status}}
I1123 08:42:00.376539 196829 cli_runner.go:164] Run: docker container inspect old-k8s-version-180638 --format={{.State.Status}}
I1123 08:42:00.379011 196829 out.go:179] * Verifying Kubernetes components...
I1123 08:42:00.382111 196829 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:42:00.428496 196829 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-180638"
I1123 08:42:00.428566 196829 host.go:66] Checking if "old-k8s-version-180638" exists ...
I1123 08:42:00.429356 196829 cli_runner.go:164] Run: docker container inspect old-k8s-version-180638 --format={{.State.Status}}
I1123 08:42:00.444047 196829 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:42:00.448509 196829 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:42:00.448558 196829 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1123 08:42:00.448647 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:42:00.472475 196829 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1123 08:42:00.472504 196829 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1123 08:42:00.472636 196829 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-180638
I1123 08:42:00.490205 196829 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa Username:docker}
I1123 08:42:00.514193 196829 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21966-2339/.minikube/machines/old-k8s-version-180638/id_rsa Username:docker}
I1123 08:42:00.878161 196829 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1123 08:42:00.878301 196829 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:42:00.916437 196829 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1123 08:42:01.023971 196829 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:42:01.723716 196829 start.go:977] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I1123 08:42:01.726193 196829 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-180638" to be "Ready" ...
I1123 08:42:02.171067 196829 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.147020479s)
I1123 08:42:02.174415 196829 out.go:179] * Enabled addons: default-storageclass, storage-provisioner
I1123 08:42:02.177439 196829 addons.go:530] duration metric: took 1.801906906s for enable addons: enabled=[default-storageclass storage-provisioner]
I1123 08:42:02.232613 196829 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-180638" context rescaled to 1 replicas
W1123 08:42:03.730244 196829 node_ready.go:57] node "old-k8s-version-180638" has "Ready":"False" status (will retry)
W1123 08:42:06.235867 196829 node_ready.go:57] node "old-k8s-version-180638" has "Ready":"False" status (will retry)
W1123 08:42:08.729375 196829 node_ready.go:57] node "old-k8s-version-180638" has "Ready":"False" status (will retry)
W1123 08:42:10.729575 196829 node_ready.go:57] node "old-k8s-version-180638" has "Ready":"False" status (will retry)
W1123 08:42:12.729904 196829 node_ready.go:57] node "old-k8s-version-180638" has "Ready":"False" status (will retry)
I1123 08:42:13.730112 196829 node_ready.go:49] node "old-k8s-version-180638" is "Ready"
I1123 08:42:13.730141 196829 node_ready.go:38] duration metric: took 12.003828725s for node "old-k8s-version-180638" to be "Ready" ...
I1123 08:42:13.730157 196829 api_server.go:52] waiting for apiserver process to appear ...
I1123 08:42:13.730215 196829 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 08:42:13.742876 196829 api_server.go:72] duration metric: took 13.367936978s to wait for apiserver process to appear ...
I1123 08:42:13.742904 196829 api_server.go:88] waiting for apiserver healthz status ...
I1123 08:42:13.742928 196829 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:42:13.752538 196829 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I1123 08:42:13.753958 196829 api_server.go:141] control plane version: v1.28.0
I1123 08:42:13.753984 196829 api_server.go:131] duration metric: took 11.072911ms to wait for apiserver health ...
I1123 08:42:13.753994 196829 system_pods.go:43] waiting for kube-system pods to appear ...
I1123 08:42:13.757334 196829 system_pods.go:59] 8 kube-system pods found
I1123 08:42:13.757377 196829 system_pods.go:61] "coredns-5dd5756b68-q4lbv" [9a14996d-e910-4a4f-a6f6-f2d8565a4b9c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:42:13.757384 196829 system_pods.go:61] "etcd-old-k8s-version-180638" [d7e82a35-eda7-493b-8f80-319fff10e0a8] Running
I1123 08:42:13.757390 196829 system_pods.go:61] "kindnet-mrfgl" [53d90f3f-687b-45a0-a344-321a75f38a20] Running
I1123 08:42:13.757394 196829 system_pods.go:61] "kube-apiserver-old-k8s-version-180638" [6d727a9f-96a5-47f1-8676-3463c38e31e8] Running
I1123 08:42:13.757398 196829 system_pods.go:61] "kube-controller-manager-old-k8s-version-180638" [92875b86-8bd3-4b30-acdd-2c65db14c97e] Running
I1123 08:42:13.757402 196829 system_pods.go:61] "kube-proxy-dk6g5" [27bc489f-26f8-4848-9df2-6530dcad7423] Running
I1123 08:42:13.757449 196829 system_pods.go:61] "kube-scheduler-old-k8s-version-180638" [76e55a3f-6b02-43c4-ae79-01300e9dd2c6] Running
I1123 08:42:13.757461 196829 system_pods.go:61] "storage-provisioner" [fa923b06-d896-468f-8e82-51b4e9df88dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:42:13.757470 196829 system_pods.go:74] duration metric: took 3.469421ms to wait for pod list to return data ...
I1123 08:42:13.757483 196829 default_sa.go:34] waiting for default service account to be created ...
I1123 08:42:13.759772 196829 default_sa.go:45] found service account: "default"
I1123 08:42:13.759795 196829 default_sa.go:55] duration metric: took 2.306419ms for default service account to be created ...
I1123 08:42:13.759805 196829 system_pods.go:116] waiting for k8s-apps to be running ...
I1123 08:42:13.764346 196829 system_pods.go:86] 8 kube-system pods found
I1123 08:42:13.764381 196829 system_pods.go:89] "coredns-5dd5756b68-q4lbv" [9a14996d-e910-4a4f-a6f6-f2d8565a4b9c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:42:13.764387 196829 system_pods.go:89] "etcd-old-k8s-version-180638" [d7e82a35-eda7-493b-8f80-319fff10e0a8] Running
I1123 08:42:13.764393 196829 system_pods.go:89] "kindnet-mrfgl" [53d90f3f-687b-45a0-a344-321a75f38a20] Running
I1123 08:42:13.764398 196829 system_pods.go:89] "kube-apiserver-old-k8s-version-180638" [6d727a9f-96a5-47f1-8676-3463c38e31e8] Running
I1123 08:42:13.764402 196829 system_pods.go:89] "kube-controller-manager-old-k8s-version-180638" [92875b86-8bd3-4b30-acdd-2c65db14c97e] Running
I1123 08:42:13.764426 196829 system_pods.go:89] "kube-proxy-dk6g5" [27bc489f-26f8-4848-9df2-6530dcad7423] Running
I1123 08:42:13.764438 196829 system_pods.go:89] "kube-scheduler-old-k8s-version-180638" [76e55a3f-6b02-43c4-ae79-01300e9dd2c6] Running
I1123 08:42:13.764445 196829 system_pods.go:89] "storage-provisioner" [fa923b06-d896-468f-8e82-51b4e9df88dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:42:13.764468 196829 retry.go:31] will retry after 231.795609ms: missing components: kube-dns
I1123 08:42:14.002188 196829 system_pods.go:86] 8 kube-system pods found
I1123 08:42:14.002226 196829 system_pods.go:89] "coredns-5dd5756b68-q4lbv" [9a14996d-e910-4a4f-a6f6-f2d8565a4b9c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:42:14.002234 196829 system_pods.go:89] "etcd-old-k8s-version-180638" [d7e82a35-eda7-493b-8f80-319fff10e0a8] Running
I1123 08:42:14.002241 196829 system_pods.go:89] "kindnet-mrfgl" [53d90f3f-687b-45a0-a344-321a75f38a20] Running
I1123 08:42:14.002290 196829 system_pods.go:89] "kube-apiserver-old-k8s-version-180638" [6d727a9f-96a5-47f1-8676-3463c38e31e8] Running
I1123 08:42:14.002297 196829 system_pods.go:89] "kube-controller-manager-old-k8s-version-180638" [92875b86-8bd3-4b30-acdd-2c65db14c97e] Running
I1123 08:42:14.002309 196829 system_pods.go:89] "kube-proxy-dk6g5" [27bc489f-26f8-4848-9df2-6530dcad7423] Running
I1123 08:42:14.002313 196829 system_pods.go:89] "kube-scheduler-old-k8s-version-180638" [76e55a3f-6b02-43c4-ae79-01300e9dd2c6] Running
I1123 08:42:14.002319 196829 system_pods.go:89] "storage-provisioner" [fa923b06-d896-468f-8e82-51b4e9df88dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:42:14.002358 196829 retry.go:31] will retry after 309.541133ms: missing components: kube-dns
I1123 08:42:14.316329 196829 system_pods.go:86] 8 kube-system pods found
I1123 08:42:14.316371 196829 system_pods.go:89] "coredns-5dd5756b68-q4lbv" [9a14996d-e910-4a4f-a6f6-f2d8565a4b9c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:42:14.316378 196829 system_pods.go:89] "etcd-old-k8s-version-180638" [d7e82a35-eda7-493b-8f80-319fff10e0a8] Running
I1123 08:42:14.316410 196829 system_pods.go:89] "kindnet-mrfgl" [53d90f3f-687b-45a0-a344-321a75f38a20] Running
I1123 08:42:14.316416 196829 system_pods.go:89] "kube-apiserver-old-k8s-version-180638" [6d727a9f-96a5-47f1-8676-3463c38e31e8] Running
I1123 08:42:14.316420 196829 system_pods.go:89] "kube-controller-manager-old-k8s-version-180638" [92875b86-8bd3-4b30-acdd-2c65db14c97e] Running
I1123 08:42:14.316425 196829 system_pods.go:89] "kube-proxy-dk6g5" [27bc489f-26f8-4848-9df2-6530dcad7423] Running
I1123 08:42:14.316453 196829 system_pods.go:89] "kube-scheduler-old-k8s-version-180638" [76e55a3f-6b02-43c4-ae79-01300e9dd2c6] Running
I1123 08:42:14.316462 196829 system_pods.go:89] "storage-provisioner" [fa923b06-d896-468f-8e82-51b4e9df88dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:42:14.316487 196829 retry.go:31] will retry after 469.87728ms: missing components: kube-dns
I1123 08:42:14.791058 196829 system_pods.go:86] 8 kube-system pods found
I1123 08:42:14.791093 196829 system_pods.go:89] "coredns-5dd5756b68-q4lbv" [9a14996d-e910-4a4f-a6f6-f2d8565a4b9c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:42:14.791100 196829 system_pods.go:89] "etcd-old-k8s-version-180638" [d7e82a35-eda7-493b-8f80-319fff10e0a8] Running
I1123 08:42:14.791106 196829 system_pods.go:89] "kindnet-mrfgl" [53d90f3f-687b-45a0-a344-321a75f38a20] Running
I1123 08:42:14.791110 196829 system_pods.go:89] "kube-apiserver-old-k8s-version-180638" [6d727a9f-96a5-47f1-8676-3463c38e31e8] Running
I1123 08:42:14.791115 196829 system_pods.go:89] "kube-controller-manager-old-k8s-version-180638" [92875b86-8bd3-4b30-acdd-2c65db14c97e] Running
I1123 08:42:14.791119 196829 system_pods.go:89] "kube-proxy-dk6g5" [27bc489f-26f8-4848-9df2-6530dcad7423] Running
I1123 08:42:14.791123 196829 system_pods.go:89] "kube-scheduler-old-k8s-version-180638" [76e55a3f-6b02-43c4-ae79-01300e9dd2c6] Running
I1123 08:42:14.791129 196829 system_pods.go:89] "storage-provisioner" [fa923b06-d896-468f-8e82-51b4e9df88dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:42:14.791144 196829 retry.go:31] will retry after 367.579223ms: missing components: kube-dns
I1123 08:42:15.163345 196829 system_pods.go:86] 8 kube-system pods found
I1123 08:42:15.163377 196829 system_pods.go:89] "coredns-5dd5756b68-q4lbv" [9a14996d-e910-4a4f-a6f6-f2d8565a4b9c] Running
I1123 08:42:15.163384 196829 system_pods.go:89] "etcd-old-k8s-version-180638" [d7e82a35-eda7-493b-8f80-319fff10e0a8] Running
I1123 08:42:15.163388 196829 system_pods.go:89] "kindnet-mrfgl" [53d90f3f-687b-45a0-a344-321a75f38a20] Running
I1123 08:42:15.163393 196829 system_pods.go:89] "kube-apiserver-old-k8s-version-180638" [6d727a9f-96a5-47f1-8676-3463c38e31e8] Running
I1123 08:42:15.163398 196829 system_pods.go:89] "kube-controller-manager-old-k8s-version-180638" [92875b86-8bd3-4b30-acdd-2c65db14c97e] Running
I1123 08:42:15.163401 196829 system_pods.go:89] "kube-proxy-dk6g5" [27bc489f-26f8-4848-9df2-6530dcad7423] Running
I1123 08:42:15.163405 196829 system_pods.go:89] "kube-scheduler-old-k8s-version-180638" [76e55a3f-6b02-43c4-ae79-01300e9dd2c6] Running
I1123 08:42:15.163409 196829 system_pods.go:89] "storage-provisioner" [fa923b06-d896-468f-8e82-51b4e9df88dc] Running
I1123 08:42:15.163417 196829 system_pods.go:126] duration metric: took 1.403606184s to wait for k8s-apps to be running ...
I1123 08:42:15.163424 196829 system_svc.go:44] waiting for kubelet service to be running ....
I1123 08:42:15.163481 196829 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1123 08:42:15.176644 196829 system_svc.go:56] duration metric: took 13.210368ms WaitForService to wait for kubelet
I1123 08:42:15.176674 196829 kubeadm.go:587] duration metric: took 14.80173902s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 08:42:15.176693 196829 node_conditions.go:102] verifying NodePressure condition ...
I1123 08:42:15.179781 196829 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1123 08:42:15.179818 196829 node_conditions.go:123] node cpu capacity is 2
I1123 08:42:15.179832 196829 node_conditions.go:105] duration metric: took 3.134393ms to run NodePressure ...
I1123 08:42:15.179843 196829 start.go:242] waiting for startup goroutines ...
I1123 08:42:15.179851 196829 start.go:247] waiting for cluster config update ...
I1123 08:42:15.179867 196829 start.go:256] writing updated cluster config ...
I1123 08:42:15.180158 196829 ssh_runner.go:195] Run: rm -f paused
I1123 08:42:15.184124 196829 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 08:42:15.188984 196829 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-q4lbv" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.197388 196829 pod_ready.go:94] pod "coredns-5dd5756b68-q4lbv" is "Ready"
I1123 08:42:15.197483 196829 pod_ready.go:86] duration metric: took 8.468594ms for pod "coredns-5dd5756b68-q4lbv" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.200541 196829 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.205348 196829 pod_ready.go:94] pod "etcd-old-k8s-version-180638" is "Ready"
I1123 08:42:15.205396 196829 pod_ready.go:86] duration metric: took 4.809714ms for pod "etcd-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.208274 196829 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.213022 196829 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-180638" is "Ready"
I1123 08:42:15.213049 196829 pod_ready.go:86] duration metric: took 4.746468ms for pod "kube-apiserver-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.216062 196829 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.588621 196829 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-180638" is "Ready"
I1123 08:42:15.588649 196829 pod_ready.go:86] duration metric: took 372.560174ms for pod "kube-controller-manager-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:15.789577 196829 pod_ready.go:83] waiting for pod "kube-proxy-dk6g5" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:16.188996 196829 pod_ready.go:94] pod "kube-proxy-dk6g5" is "Ready"
I1123 08:42:16.189025 196829 pod_ready.go:86] duration metric: took 399.418985ms for pod "kube-proxy-dk6g5" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:16.388950 196829 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:16.788322 196829 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-180638" is "Ready"
I1123 08:42:16.788348 196829 pod_ready.go:86] duration metric: took 399.371796ms for pod "kube-scheduler-old-k8s-version-180638" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:42:16.788362 196829 pod_ready.go:40] duration metric: took 1.604205013s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 08:42:16.845637 196829 start.go:625] kubectl: 1.33.2, cluster: 1.28.0 (minor skew: 5)
I1123 08:42:16.848524 196829 out.go:203]
W1123 08:42:16.851133 196829 out.go:285] ! /usr/local/bin/kubectl is version 1.33.2, which may have incompatibilities with Kubernetes 1.28.0.
I1123 08:42:16.854166 196829 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1123 08:42:16.857768 196829 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-180638" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
91bc48b43ecc6 1611cd07b61d5 9 seconds ago Running busybox 0 e4ae249cb52e3 busybox default
d28eb2e2ce196 ba04bb24b9575 15 seconds ago Running storage-provisioner 0 a34410332e173 storage-provisioner kube-system
7c2ec14edc41a 97e04611ad434 15 seconds ago Running coredns 0 c2b32ac0a3158 coredns-5dd5756b68-q4lbv kube-system
75439fed83684 b1a8c6f707935 26 seconds ago Running kindnet-cni 0 4f646733919cf kindnet-mrfgl kube-system
a92786aea3fde 940f54a5bcae9 29 seconds ago Running kube-proxy 0 304e17d801222 kube-proxy-dk6g5 kube-system
dd592fa780598 9cdd6470f48c8 50 seconds ago Running etcd 0 6c8aefe95a6ce etcd-old-k8s-version-180638 kube-system
9b79849edeb76 00543d2fe5d71 50 seconds ago Running kube-apiserver 0 53e8e5479de81 kube-apiserver-old-k8s-version-180638 kube-system
3a3a4da63be8b 46cc66ccc7c19 50 seconds ago Running kube-controller-manager 0 79bfc44a51fa1 kube-controller-manager-old-k8s-version-180638 kube-system
81034c6fa713b 762dce4090c5f 50 seconds ago Running kube-scheduler 0 13b0850cbdf71 kube-scheduler-old-k8s-version-180638 kube-system
==> containerd <==
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.182847695Z" level=info msg="CreateContainer within sandbox \"c2b32ac0a3158e5b8e88a60e8ec54f99f67326e1aba5a91b8ead5c4893516fa1\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"7c2ec14edc41a4bad3c997ddbc366a57e75740ffe5d4d804776570d3bbf4089a\""
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.185890280Z" level=info msg="StartContainer for \"7c2ec14edc41a4bad3c997ddbc366a57e75740ffe5d4d804776570d3bbf4089a\""
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.188348487Z" level=info msg="connecting to shim 7c2ec14edc41a4bad3c997ddbc366a57e75740ffe5d4d804776570d3bbf4089a" address="unix:///run/containerd/s/23e8ed77d6d2545cc040cf10e94b9aa1307cea730c4e678c5b1ba5d216eb3aae" protocol=ttrpc version=3
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.191389545Z" level=info msg="CreateContainer within sandbox \"a34410332e1739898fe28b96e52dd9c87f97e3c9bb7b1ffd7f9865c04fcab2a8\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"d28eb2e2ce19649f4947ef6afbf30d211d2dfa34551b90f0c10c58fdb65b63cd\""
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.193542238Z" level=info msg="StartContainer for \"d28eb2e2ce19649f4947ef6afbf30d211d2dfa34551b90f0c10c58fdb65b63cd\""
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.194431932Z" level=info msg="connecting to shim d28eb2e2ce19649f4947ef6afbf30d211d2dfa34551b90f0c10c58fdb65b63cd" address="unix:///run/containerd/s/21496efb2be254d32b19cec40d0f9ba01ff31efa61fb387b7a12652ab6551c66" protocol=ttrpc version=3
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.282934097Z" level=info msg="StartContainer for \"7c2ec14edc41a4bad3c997ddbc366a57e75740ffe5d4d804776570d3bbf4089a\" returns successfully"
Nov 23 08:42:14 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:14.297466583Z" level=info msg="StartContainer for \"d28eb2e2ce19649f4947ef6afbf30d211d2dfa34551b90f0c10c58fdb65b63cd\" returns successfully"
Nov 23 08:42:17 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:17.425584929Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:54457203-a4b0-4bfe-b7e6-9804ec70353f,Namespace:default,Attempt:0,}"
Nov 23 08:42:17 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:17.487893445Z" level=info msg="connecting to shim e4ae249cb52e36b4d0a2f9b31e40d2ac6f561a86c7f5020966174ef8dddb28bd" address="unix:///run/containerd/s/48de018ba0d9ba174f3818878e132ec8a301b930403195fb51f42bfd7ba5e6a1" namespace=k8s.io protocol=ttrpc version=3
Nov 23 08:42:17 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:17.543159907Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:54457203-a4b0-4bfe-b7e6-9804ec70353f,Namespace:default,Attempt:0,} returns sandbox id \"e4ae249cb52e36b4d0a2f9b31e40d2ac6f561a86c7f5020966174ef8dddb28bd\""
Nov 23 08:42:17 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:17.545984305Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.686274001Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.688431814Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=1937188"
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.690941320Z" level=info msg="ImageCreate event name:\"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.694195944Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.694856179Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"1935750\" in 2.148586355s"
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.694995922Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\""
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.698429970Z" level=info msg="CreateContainer within sandbox \"e4ae249cb52e36b4d0a2f9b31e40d2ac6f561a86c7f5020966174ef8dddb28bd\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.711352120Z" level=info msg="Container 91bc48b43ecc67ffc1bc3f7fdbc911d26a4116b41c47fa062edb6c0dda1555ed: CDI devices from CRI Config.CDIDevices: []"
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.724026742Z" level=info msg="CreateContainer within sandbox \"e4ae249cb52e36b4d0a2f9b31e40d2ac6f561a86c7f5020966174ef8dddb28bd\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"91bc48b43ecc67ffc1bc3f7fdbc911d26a4116b41c47fa062edb6c0dda1555ed\""
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.724804821Z" level=info msg="StartContainer for \"91bc48b43ecc67ffc1bc3f7fdbc911d26a4116b41c47fa062edb6c0dda1555ed\""
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.725703310Z" level=info msg="connecting to shim 91bc48b43ecc67ffc1bc3f7fdbc911d26a4116b41c47fa062edb6c0dda1555ed" address="unix:///run/containerd/s/48de018ba0d9ba174f3818878e132ec8a301b930403195fb51f42bfd7ba5e6a1" protocol=ttrpc version=3
Nov 23 08:42:19 old-k8s-version-180638 containerd[758]: time="2025-11-23T08:42:19.798131338Z" level=info msg="StartContainer for \"91bc48b43ecc67ffc1bc3f7fdbc911d26a4116b41c47fa062edb6c0dda1555ed\" returns successfully"
Nov 23 08:42:26 old-k8s-version-180638 containerd[758]: E1123 08:42:26.310347 758 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [7c2ec14edc41a4bad3c997ddbc366a57e75740ffe5d4d804776570d3bbf4089a] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = b7aacdf6a6aa730aafe4d018cac9b7b5ecfb346cba84a99f64521f87aef8b4958639c1cf97967716465791d05bd38f372615327b7cb1d93c850bae532744d54d
CoreDNS-1.10.1
linux/arm64, go1.20, 055b2c3
[INFO] 127.0.0.1:55800 - 59523 "HINFO IN 7767641017076382384.181717569997239392. udp 56 false 512" NXDOMAIN qr,rd,ra 56 0.011046589s
==> describe nodes <==
Name: old-k8s-version-180638
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=old-k8s-version-180638
kubernetes.io/os=linux
minikube.k8s.io/commit=3e219827a5f064cf736992b79e59864301ece66e
minikube.k8s.io/name=old-k8s-version-180638
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_23T08_41_47_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 23 Nov 2025 08:41:43 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-180638
AcquireTime: <unset>
RenewTime: Sun, 23 Nov 2025 08:42:27 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sun, 23 Nov 2025 08:42:17 +0000 Sun, 23 Nov 2025 08:41:40 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sun, 23 Nov 2025 08:42:17 +0000 Sun, 23 Nov 2025 08:41:40 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sun, 23 Nov 2025 08:42:17 +0000 Sun, 23 Nov 2025 08:41:40 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sun, 23 Nov 2025 08:42:17 +0000 Sun, 23 Nov 2025 08:42:13 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: old-k8s-version-180638
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 7283ea1857f18f20a875c29069214c9d
System UUID: 66eb206b-bbaa-475d-8a79-ca34c9a5fe12
Boot ID: 728df74d-5f50-461c-8d62-9d80cc778630
Kernel Version: 5.15.0-1084-aws
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12s
kube-system coredns-5dd5756b68-q4lbv 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 29s
kube-system etcd-old-k8s-version-180638 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 42s
kube-system kindnet-mrfgl 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 30s
kube-system kube-apiserver-old-k8s-version-180638 250m (12%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system kube-controller-manager-old-k8s-version-180638 200m (10%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system kube-proxy-dk6g5 0 (0%) 0 (0%) 0 (0%) 0 (0%) 30s
kube-system kube-scheduler-old-k8s-version-180638 100m (5%) 0 (0%) 0 (0%) 0 (0%) 44s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 27s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 100m (5%)
memory 220Mi (2%) 220Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 28s kube-proxy
Normal Starting 43s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 43s kubelet Node old-k8s-version-180638 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 43s kubelet Node old-k8s-version-180638 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 43s kubelet Node old-k8s-version-180638 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 42s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 30s node-controller Node old-k8s-version-180638 event: Registered Node old-k8s-version-180638 in Controller
Normal NodeReady 16s kubelet Node old-k8s-version-180638 status is now: NodeReady
==> dmesg <==
[Nov23 07:17] ACPI: SRAT not present
[ +0.000000] ACPI: SRAT not present
[ +0.000000] SPI driver altr_a10sr has no spi_device_id for altr,a10sr
[ +0.015154] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.511595] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.034200] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +0.753844] ena 0000:00:05.0: LLQ is not supported Fallback to host mode policy.
[ +6.833249] kauditd_printk_skb: 36 callbacks suppressed
[Nov23 08:37] overlayfs: failed to resolve '/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/22/fs': -2
==> etcd [dd592fa780598a368949db2030306613299c5b0608cf477fbac364062431cf64] <==
{"level":"info","ts":"2025-11-23T08:41:39.991972Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 switched to configuration voters=(16896983918768216326)"}
{"level":"info","ts":"2025-11-23T08:41:39.99787Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","added-peer-id":"ea7e25599daad906","added-peer-peer-urls":["https://192.168.76.2:2380"]}
{"level":"info","ts":"2025-11-23T08:41:40.001646Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-11-23T08:41:40.001836Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-11-23T08:41:40.002083Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-11-23T08:41:40.005961Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"ea7e25599daad906","initial-advertise-peer-urls":["https://192.168.76.2:2380"],"listen-peer-urls":["https://192.168.76.2:2380"],"advertise-client-urls":["https://192.168.76.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.76.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-23T08:41:40.00623Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-23T08:41:40.257458Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-23T08:41:40.25751Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-23T08:41:40.257539Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgPreVoteResp from ea7e25599daad906 at term 1"}
{"level":"info","ts":"2025-11-23T08:41:40.257552Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became candidate at term 2"}
{"level":"info","ts":"2025-11-23T08:41:40.257563Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgVoteResp from ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-23T08:41:40.257574Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became leader at term 2"}
{"level":"info","ts":"2025-11-23T08:41:40.257585Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: ea7e25599daad906 elected leader ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-23T08:41:40.268883Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"ea7e25599daad906","local-member-attributes":"{Name:old-k8s-version-180638 ClientURLs:[https://192.168.76.2:2379]}","request-path":"/0/members/ea7e25599daad906/attributes","cluster-id":"6f20f2c4b2fb5f8a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-23T08:41:40.268928Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-23T08:41:40.269997Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-23T08:41:40.270072Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T08:41:40.279065Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-23T08:41:40.280196Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.76.2:2379"}
{"level":"info","ts":"2025-11-23T08:41:40.28074Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-23T08:41:40.280878Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-23T08:41:40.285959Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T08:41:40.2918Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T08:41:40.291928Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
==> kernel <==
08:42:29 up 1:24, 0 user, load average: 2.87, 3.94, 3.11
Linux old-k8s-version-180638 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [75439fed83684fc39ca1dda64cef2644f6e3027bddbd15dff08e7923652250de] <==
I1123 08:42:03.267849 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1123 08:42:03.357718 1 main.go:139] hostIP = 192.168.76.2
podIP = 192.168.76.2
I1123 08:42:03.358291 1 main.go:148] setting mtu 1500 for CNI
I1123 08:42:03.358311 1 main.go:178] kindnetd IP family: "ipv4"
I1123 08:42:03.358351 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-23T08:42:03Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1123 08:42:03.558800 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1123 08:42:03.558877 1 controller.go:381] "Waiting for informer caches to sync"
I1123 08:42:03.558907 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1123 08:42:03.559938 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1123 08:42:03.759127 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1123 08:42:03.759212 1 metrics.go:72] Registering metrics
I1123 08:42:03.759307 1 controller.go:711] "Syncing nftables rules"
I1123 08:42:13.563082 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1123 08:42:13.563142 1 main.go:301] handling current node
I1123 08:42:23.558928 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1123 08:42:23.558968 1 main.go:301] handling current node
==> kube-apiserver [9b79849edeb76ebe3d1f35f60331849eb478148607f83d7e7cc04f6a89d49cef] <==
I1123 08:41:43.257160 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1123 08:41:43.264540 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1123 08:41:43.264801 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1123 08:41:43.264982 1 aggregator.go:166] initial CRD sync complete...
I1123 08:41:43.265064 1 autoregister_controller.go:141] Starting autoregister controller
I1123 08:41:43.265148 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1123 08:41:43.265235 1 cache.go:39] Caches are synced for autoregister controller
I1123 08:41:43.265825 1 apf_controller.go:377] Running API Priority and Fairness config worker
I1123 08:41:43.266130 1 apf_controller.go:380] Running API Priority and Fairness periodic rebalancing process
I1123 08:41:43.266262 1 shared_informer.go:318] Caches are synced for node_authorizer
I1123 08:41:44.072890 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1123 08:41:44.080525 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1123 08:41:44.080644 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1123 08:41:44.750567 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1123 08:41:44.801229 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1123 08:41:44.903834 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1123 08:41:44.910704 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I1123 08:41:44.911792 1 controller.go:624] quota admission added evaluator for: endpoints
I1123 08:41:44.916638 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1123 08:41:45.103928 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1123 08:41:46.604806 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1123 08:41:46.621587 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1123 08:41:46.632358 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1123 08:41:59.838596 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1123 08:41:59.989364 1 controller.go:624] quota admission added evaluator for: replicasets.apps
==> kube-controller-manager [3a3a4da63be8b591cb08202b3fb1a9b242f87a54811f462f72de264b9c1b565d] <==
I1123 08:41:59.234069 1 shared_informer.go:318] Caches are synced for cronjob
I1123 08:41:59.238552 1 shared_informer.go:318] Caches are synced for disruption
I1123 08:41:59.287773 1 shared_informer.go:318] Caches are synced for resource quota
I1123 08:41:59.635666 1 shared_informer.go:318] Caches are synced for garbage collector
I1123 08:41:59.635720 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1123 08:41:59.643988 1 shared_informer.go:318] Caches are synced for garbage collector
I1123 08:41:59.852836 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-mrfgl"
I1123 08:41:59.861554 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-dk6g5"
I1123 08:41:59.994988 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1123 08:42:00.250351 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-q4lbv"
I1123 08:42:00.296011 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-j889m"
I1123 08:42:00.327262 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="332.952552ms"
I1123 08:42:00.350757 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="23.440283ms"
I1123 08:42:00.350887 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="77.187µs"
I1123 08:42:01.812294 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1123 08:42:01.846504 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-j889m"
I1123 08:42:01.870499 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="58.780918ms"
I1123 08:42:01.879884 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="9.337438ms"
I1123 08:42:01.882600 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="46.828µs"
I1123 08:42:13.666896 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="126.5µs"
I1123 08:42:13.681803 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="75.242µs"
I1123 08:42:14.082595 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
I1123 08:42:14.904458 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="120.896µs"
I1123 08:42:14.939466 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="17.592228ms"
I1123 08:42:14.940407 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="44.694µs"
==> kube-proxy [a92786aea3fde1301dec08d36ed3b9e913c480310fa0d744d9a1cf2c70d26621] <==
I1123 08:42:00.964044 1 server_others.go:69] "Using iptables proxy"
I1123 08:42:01.013279 1 node.go:141] Successfully retrieved node IP: 192.168.76.2
I1123 08:42:01.120318 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1123 08:42:01.122189 1 server_others.go:152] "Using iptables Proxier"
I1123 08:42:01.122231 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1123 08:42:01.122239 1 server_others.go:438] "Defaulting to no-op detect-local"
I1123 08:42:01.122283 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1123 08:42:01.122555 1 server.go:846] "Version info" version="v1.28.0"
I1123 08:42:01.122986 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1123 08:42:01.123668 1 config.go:188] "Starting service config controller"
I1123 08:42:01.123741 1 shared_informer.go:311] Waiting for caches to sync for service config
I1123 08:42:01.123783 1 config.go:97] "Starting endpoint slice config controller"
I1123 08:42:01.123795 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1123 08:42:01.124686 1 config.go:315] "Starting node config controller"
I1123 08:42:01.124705 1 shared_informer.go:311] Waiting for caches to sync for node config
I1123 08:42:01.224043 1 shared_informer.go:318] Caches are synced for endpoint slice config
I1123 08:42:01.224109 1 shared_informer.go:318] Caches are synced for service config
I1123 08:42:01.225482 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [81034c6fa713b6148ba16d2f50c50ea8e020311ed53ed7d84f6606a76362fc4f] <==
W1123 08:41:43.659650 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1123 08:41:43.659675 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1123 08:41:43.666035 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1123 08:41:43.666076 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1123 08:41:43.666131 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1123 08:41:43.666152 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1123 08:41:43.666282 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1123 08:41:43.666305 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1123 08:41:43.666372 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1123 08:41:43.666389 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1123 08:41:43.666441 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1123 08:41:43.666456 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1123 08:41:43.666515 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1123 08:41:43.666530 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W1123 08:41:43.666580 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1123 08:41:43.666596 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1123 08:41:43.666648 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1123 08:41:43.666663 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1123 08:41:43.666710 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1123 08:41:43.666725 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1123 08:41:44.500880 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1123 08:41:44.500926 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1123 08:41:44.538573 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1123 08:41:44.538617 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
I1123 08:41:45.053879 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.155079 1561 kuberuntime_manager.go:1463] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.155695 1561 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.867270 1561 topology_manager.go:215] "Topology Admit Handler" podUID="53d90f3f-687b-45a0-a344-321a75f38a20" podNamespace="kube-system" podName="kindnet-mrfgl"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.882839 1561 topology_manager.go:215] "Topology Admit Handler" podUID="27bc489f-26f8-4848-9df2-6530dcad7423" podNamespace="kube-system" podName="kube-proxy-dk6g5"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909264 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/53d90f3f-687b-45a0-a344-321a75f38a20-xtables-lock\") pod \"kindnet-mrfgl\" (UID: \"53d90f3f-687b-45a0-a344-321a75f38a20\") " pod="kube-system/kindnet-mrfgl"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909324 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k598w\" (UniqueName: \"kubernetes.io/projected/53d90f3f-687b-45a0-a344-321a75f38a20-kube-api-access-k598w\") pod \"kindnet-mrfgl\" (UID: \"53d90f3f-687b-45a0-a344-321a75f38a20\") " pod="kube-system/kindnet-mrfgl"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909349 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/27bc489f-26f8-4848-9df2-6530dcad7423-kube-proxy\") pod \"kube-proxy-dk6g5\" (UID: \"27bc489f-26f8-4848-9df2-6530dcad7423\") " pod="kube-system/kube-proxy-dk6g5"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909373 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/27bc489f-26f8-4848-9df2-6530dcad7423-xtables-lock\") pod \"kube-proxy-dk6g5\" (UID: \"27bc489f-26f8-4848-9df2-6530dcad7423\") " pod="kube-system/kube-proxy-dk6g5"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909397 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/27bc489f-26f8-4848-9df2-6530dcad7423-lib-modules\") pod \"kube-proxy-dk6g5\" (UID: \"27bc489f-26f8-4848-9df2-6530dcad7423\") " pod="kube-system/kube-proxy-dk6g5"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909438 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/53d90f3f-687b-45a0-a344-321a75f38a20-cni-cfg\") pod \"kindnet-mrfgl\" (UID: \"53d90f3f-687b-45a0-a344-321a75f38a20\") " pod="kube-system/kindnet-mrfgl"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909462 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/53d90f3f-687b-45a0-a344-321a75f38a20-lib-modules\") pod \"kindnet-mrfgl\" (UID: \"53d90f3f-687b-45a0-a344-321a75f38a20\") " pod="kube-system/kindnet-mrfgl"
Nov 23 08:41:59 old-k8s-version-180638 kubelet[1561]: I1123 08:41:59.909488 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djzpr\" (UniqueName: \"kubernetes.io/projected/27bc489f-26f8-4848-9df2-6530dcad7423-kube-api-access-djzpr\") pod \"kube-proxy-dk6g5\" (UID: \"27bc489f-26f8-4848-9df2-6530dcad7423\") " pod="kube-system/kube-proxy-dk6g5"
Nov 23 08:42:00 old-k8s-version-180638 kubelet[1561]: I1123 08:42:00.902067 1561 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-dk6g5" podStartSLOduration=1.902024352 podCreationTimestamp="2025-11-23 08:41:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 08:42:00.901629237 +0000 UTC m=+14.326420754" watchObservedRunningTime="2025-11-23 08:42:00.902024352 +0000 UTC m=+14.326815869"
Nov 23 08:42:06 old-k8s-version-180638 kubelet[1561]: I1123 08:42:06.732067 1561 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-mrfgl" podStartSLOduration=5.495030012 podCreationTimestamp="2025-11-23 08:41:59 +0000 UTC" firstStartedPulling="2025-11-23 08:42:00.776285773 +0000 UTC m=+14.201077291" lastFinishedPulling="2025-11-23 08:42:03.013276696 +0000 UTC m=+16.438068214" observedRunningTime="2025-11-23 08:42:03.871246056 +0000 UTC m=+17.296037582" watchObservedRunningTime="2025-11-23 08:42:06.732020935 +0000 UTC m=+20.156812461"
Nov 23 08:42:13 old-k8s-version-180638 kubelet[1561]: I1123 08:42:13.622916 1561 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 23 08:42:13 old-k8s-version-180638 kubelet[1561]: I1123 08:42:13.659700 1561 topology_manager.go:215] "Topology Admit Handler" podUID="9a14996d-e910-4a4f-a6f6-f2d8565a4b9c" podNamespace="kube-system" podName="coredns-5dd5756b68-q4lbv"
Nov 23 08:42:13 old-k8s-version-180638 kubelet[1561]: I1123 08:42:13.662325 1561 topology_manager.go:215] "Topology Admit Handler" podUID="fa923b06-d896-468f-8e82-51b4e9df88dc" podNamespace="kube-system" podName="storage-provisioner"
Nov 23 08:42:13 old-k8s-version-180638 kubelet[1561]: I1123 08:42:13.844230 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbqlk\" (UniqueName: \"kubernetes.io/projected/9a14996d-e910-4a4f-a6f6-f2d8565a4b9c-kube-api-access-cbqlk\") pod \"coredns-5dd5756b68-q4lbv\" (UID: \"9a14996d-e910-4a4f-a6f6-f2d8565a4b9c\") " pod="kube-system/coredns-5dd5756b68-q4lbv"
Nov 23 08:42:13 old-k8s-version-180638 kubelet[1561]: I1123 08:42:13.844293 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsph6\" (UniqueName: \"kubernetes.io/projected/fa923b06-d896-468f-8e82-51b4e9df88dc-kube-api-access-wsph6\") pod \"storage-provisioner\" (UID: \"fa923b06-d896-468f-8e82-51b4e9df88dc\") " pod="kube-system/storage-provisioner"
Nov 23 08:42:13 old-k8s-version-180638 kubelet[1561]: I1123 08:42:13.844319 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9a14996d-e910-4a4f-a6f6-f2d8565a4b9c-config-volume\") pod \"coredns-5dd5756b68-q4lbv\" (UID: \"9a14996d-e910-4a4f-a6f6-f2d8565a4b9c\") " pod="kube-system/coredns-5dd5756b68-q4lbv"
Nov 23 08:42:13 old-k8s-version-180638 kubelet[1561]: I1123 08:42:13.844356 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/fa923b06-d896-468f-8e82-51b4e9df88dc-tmp\") pod \"storage-provisioner\" (UID: \"fa923b06-d896-468f-8e82-51b4e9df88dc\") " pod="kube-system/storage-provisioner"
Nov 23 08:42:14 old-k8s-version-180638 kubelet[1561]: I1123 08:42:14.902124 1561 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-q4lbv" podStartSLOduration=14.902081859 podCreationTimestamp="2025-11-23 08:42:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 08:42:14.90156744 +0000 UTC m=+28.326358958" watchObservedRunningTime="2025-11-23 08:42:14.902081859 +0000 UTC m=+28.326873377"
Nov 23 08:42:17 old-k8s-version-180638 kubelet[1561]: I1123 08:42:17.115903 1561 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=15.115773659 podCreationTimestamp="2025-11-23 08:42:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 08:42:14.969166121 +0000 UTC m=+28.393957647" watchObservedRunningTime="2025-11-23 08:42:17.115773659 +0000 UTC m=+30.540565177"
Nov 23 08:42:17 old-k8s-version-180638 kubelet[1561]: I1123 08:42:17.116261 1561 topology_manager.go:215] "Topology Admit Handler" podUID="54457203-a4b0-4bfe-b7e6-9804ec70353f" podNamespace="default" podName="busybox"
Nov 23 08:42:17 old-k8s-version-180638 kubelet[1561]: I1123 08:42:17.162701 1561 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rd6c\" (UniqueName: \"kubernetes.io/projected/54457203-a4b0-4bfe-b7e6-9804ec70353f-kube-api-access-5rd6c\") pod \"busybox\" (UID: \"54457203-a4b0-4bfe-b7e6-9804ec70353f\") " pod="default/busybox"
==> storage-provisioner [d28eb2e2ce19649f4947ef6afbf30d211d2dfa34551b90f0c10c58fdb65b63cd] <==
I1123 08:42:14.310989 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1123 08:42:14.328878 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1123 08:42:14.329183 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1123 08:42:14.342255 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1123 08:42:14.342845 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"46657fa0-d0c5-44e7-b4c5-6303b10aff5f", APIVersion:"v1", ResourceVersion:"414", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-180638_72561ecd-5ccf-4007-bbe6-862fc9539cb1 became leader
I1123 08:42:14.342920 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-180638_72561ecd-5ccf-4007-bbe6-862fc9539cb1!
I1123 08:42:14.443997 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-180638_72561ecd-5ccf-4007-bbe6-862fc9539cb1!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-180638 -n old-k8s-version-180638
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-180638 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestStartStop/group/old-k8s-version/serial/DeployApp (14.04s)