=== RUN TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-264160 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [2af6deb4-937f-4b9b-9de6-995e75a080b8] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [2af6deb4-937f-4b9b-9de6-995e75a080b8] Running
E1119 22:36:48.008664 4144 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/addons-030214/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 9.003496923s
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-264160 exec busybox -- /bin/sh -c "ulimit -n"
start_stop_delete_test.go:194: 'ulimit -n' returned 1024, expected 1048576
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-264160
helpers_test.go:243: (dbg) docker inspect old-k8s-version-264160:
-- stdout --
[
{
"Id": "49717cdd4541256c61f8dce96738708ef0a5263ed6216dabb995ea611616d37a",
"Created": "2025-11-19T22:35:36.829393211Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 205037,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-19T22:35:36.889026709Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:161ae512ea03f95c595a46a20f1dbd1d1e737c6a82df3ed673e089531af665da",
"ResolvConfPath": "/var/lib/docker/containers/49717cdd4541256c61f8dce96738708ef0a5263ed6216dabb995ea611616d37a/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/49717cdd4541256c61f8dce96738708ef0a5263ed6216dabb995ea611616d37a/hostname",
"HostsPath": "/var/lib/docker/containers/49717cdd4541256c61f8dce96738708ef0a5263ed6216dabb995ea611616d37a/hosts",
"LogPath": "/var/lib/docker/containers/49717cdd4541256c61f8dce96738708ef0a5263ed6216dabb995ea611616d37a/49717cdd4541256c61f8dce96738708ef0a5263ed6216dabb995ea611616d37a-json.log",
"Name": "/old-k8s-version-264160",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-264160:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "old-k8s-version-264160",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "49717cdd4541256c61f8dce96738708ef0a5263ed6216dabb995ea611616d37a",
"LowerDir": "/var/lib/docker/overlay2/feff7a4e723e18389dcb4a6f7e089bff4aeb566c5b553ed60b078e825f1fd0a8-init/diff:/var/lib/docker/overlay2/b6ebc9601ea0ae08484f263713f3358dd93f7748ebfafbd9155229908dee9606/diff",
"MergedDir": "/var/lib/docker/overlay2/feff7a4e723e18389dcb4a6f7e089bff4aeb566c5b553ed60b078e825f1fd0a8/merged",
"UpperDir": "/var/lib/docker/overlay2/feff7a4e723e18389dcb4a6f7e089bff4aeb566c5b553ed60b078e825f1fd0a8/diff",
"WorkDir": "/var/lib/docker/overlay2/feff7a4e723e18389dcb4a6f7e089bff4aeb566c5b553ed60b078e825f1fd0a8/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-264160",
"Source": "/var/lib/docker/volumes/old-k8s-version-264160/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-264160",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-264160",
"name.minikube.sigs.k8s.io": "old-k8s-version-264160",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "1c6d7c0f5ea4187c0bdb74e6f6190f3c956a222d61984cbd94ed19e45025d4c9",
"SandboxKey": "/var/run/docker/netns/1c6d7c0f5ea4",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33054"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33055"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33058"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33056"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33057"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"old-k8s-version-264160": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "52:a5:ad:7a:8b:5a",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "b720c74a0dc38658463082bcb93730b420d57f391d495ecb21d74f5ad35b4f21",
"EndpointID": "4800aba7ded95ed95a56ef1ad4bf1b238d330afe47c91b66c43c80a2794b655c",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-264160",
"49717cdd4541"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-264160 -n old-k8s-version-264160
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p old-k8s-version-264160 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p old-k8s-version-264160 logs -n 25: (1.208323284s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────┬─────────┬─────────┬─────────────────────┬────────────
─────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────┼─────────┼─────────┼─────────────────────┼────────────
─────────┤
│ ssh │ -p cilium-156590 sudo cat /etc/docker/daemon.json │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo docker system info │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo systemctl status cri-docker --all --full --no-pager │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo systemctl cat cri-docker --no-pager │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo cri-dockerd --version │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo systemctl status containerd --all --full --no-pager │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo systemctl cat containerd --no-pager │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo cat /lib/systemd/system/containerd.service │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo cat /etc/containerd/config.toml │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo containerd config dump │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo systemctl status crio --all --full --no-pager │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo systemctl cat crio --no-pager │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo crio config │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ delete │ -p cilium-156590 │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ 19 Nov 25 22:34 UTC │
│ start │ -p cert-expiration-750367 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=containerd │ cert-expiration-750367 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ 19 Nov 25 22:34 UTC │
│ ssh │ force-systemd-env-388402 ssh cat /etc/containerd/config.toml │ force-systemd-env-388402 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ 19 Nov 25 22:34 UTC │
│ delete │ -p force-systemd-env-388402 │ force-systemd-env-388402 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ 19 Nov 25 22:34 UTC │
│ start │ -p cert-options-815306 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-815306 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ 19 Nov 25 22:35 UTC │
│ ssh │ cert-options-815306 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-815306 │ jenkins │ v1.37.0 │ 19 Nov 25 22:35 UTC │ 19 Nov 25 22:35 UTC │
│ ssh │ -p cert-options-815306 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-815306 │ jenkins │ v1.37.0 │ 19 Nov 25 22:35 UTC │ 19 Nov 25 22:35 UTC │
│ delete │ -p cert-options-815306 │ cert-options-815306 │ jenkins │ v1.37.0 │ 19 Nov 25 22:35 UTC │ 19 Nov 25 22:35 UTC │
│ start │ -p old-k8s-version-264160 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-264160 │ jenkins │ v1.37.0 │ 19 Nov 25 22:35 UTC │ 19 Nov 25 22:36 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────┴─────────┴─────────┴─────────────────────┴────────────
─────────┘
==> Last Start <==
Log file created at: 2025/11/19 22:35:30
Running on machine: ip-172-31-24-2
Binary: Built with gc go1.24.6 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1119 22:35:30.257107 204649 out.go:360] Setting OutFile to fd 1 ...
I1119 22:35:30.257270 204649 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1119 22:35:30.257288 204649 out.go:374] Setting ErrFile to fd 2...
I1119 22:35:30.257293 204649 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1119 22:35:30.257586 204649 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21918-2347/.minikube/bin
I1119 22:35:30.258032 204649 out.go:368] Setting JSON to false
I1119 22:35:30.259057 204649 start.go:133] hostinfo: {"hostname":"ip-172-31-24-2","uptime":4651,"bootTime":1763587079,"procs":189,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"6d436adf-771e-4269-b9a3-c25fd4fca4f5"}
I1119 22:35:30.259135 204649 start.go:143] virtualization:
I1119 22:35:30.265034 204649 out.go:179] * [old-k8s-version-264160] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I1119 22:35:30.268600 204649 out.go:179] - MINIKUBE_LOCATION=21918
I1119 22:35:30.268654 204649 notify.go:221] Checking for updates...
I1119 22:35:30.275244 204649 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1119 22:35:30.278424 204649 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21918-2347/kubeconfig
I1119 22:35:30.281805 204649 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21918-2347/.minikube
I1119 22:35:30.285044 204649 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I1119 22:35:30.288125 204649 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1119 22:35:30.291809 204649 config.go:182] Loaded profile config "cert-expiration-750367": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1119 22:35:30.291938 204649 driver.go:422] Setting default libvirt URI to qemu:///system
I1119 22:35:30.328984 204649 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I1119 22:35:30.329118 204649 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1119 22:35:30.391514 204649 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-19 22:35:30.382377652 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pat
h:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1119 22:35:30.391618 204649 docker.go:319] overlay module found
I1119 22:35:30.394904 204649 out.go:179] * Using the docker driver based on user configuration
I1119 22:35:30.397906 204649 start.go:309] selected driver: docker
I1119 22:35:30.397928 204649 start.go:930] validating driver "docker" against <nil>
I1119 22:35:30.397942 204649 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1119 22:35:30.398744 204649 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1119 22:35:30.457338 204649 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-19 22:35:30.447544183 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pat
h:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1119 22:35:30.457505 204649 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1119 22:35:30.457734 204649 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1119 22:35:30.460603 204649 out.go:179] * Using Docker driver with root privileges
I1119 22:35:30.463555 204649 cni.go:84] Creating CNI manager for ""
I1119 22:35:30.463623 204649 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1119 22:35:30.463636 204649 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1119 22:35:30.463716 204649 start.go:353] cluster config:
{Name:old-k8s-version-264160 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-264160 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSH
AuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1119 22:35:30.466849 204649 out.go:179] * Starting "old-k8s-version-264160" primary control-plane node in "old-k8s-version-264160" cluster
I1119 22:35:30.469744 204649 cache.go:134] Beginning downloading kic base image for docker with containerd
I1119 22:35:30.472743 204649 out.go:179] * Pulling base image v0.0.48-1763561786-21918 ...
I1119 22:35:30.475730 204649 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1119 22:35:30.475797 204649 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21918-2347/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4
I1119 22:35:30.475812 204649 cache.go:65] Caching tarball of preloaded images
I1119 22:35:30.475815 204649 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 in local docker daemon
I1119 22:35:30.475897 204649 preload.go:238] Found /home/jenkins/minikube-integration/21918-2347/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I1119 22:35:30.475907 204649 cache.go:68] Finished verifying existence of preloaded tar for v1.28.0 on containerd
I1119 22:35:30.476103 204649 profile.go:143] Saving config to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/config.json ...
I1119 22:35:30.476142 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/config.json: {Name:mka3956cf816ce3f0dc4b41766ded046d7e239b7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:30.495142 204649 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 in local docker daemon, skipping pull
I1119 22:35:30.495164 204649 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 exists in daemon, skipping load
I1119 22:35:30.495178 204649 cache.go:243] Successfully downloaded all kic artifacts
I1119 22:35:30.495202 204649 start.go:360] acquireMachinesLock for old-k8s-version-264160: {Name:mkb1d6d80392c055072776fe42d903323b85b557 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:35:30.495313 204649 start.go:364] duration metric: took 84.916µs to acquireMachinesLock for "old-k8s-version-264160"
I1119 22:35:30.495346 204649 start.go:93] Provisioning new machine with config: &{Name:old-k8s-version-264160 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-264160 Namespace:default APIServerHAVIP:
APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:f
alse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1119 22:35:30.495417 204649 start.go:125] createHost starting for "" (driver="docker")
I1119 22:35:30.498755 204649 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1119 22:35:30.499000 204649 start.go:159] libmachine.API.Create for "old-k8s-version-264160" (driver="docker")
I1119 22:35:30.499040 204649 client.go:173] LocalClient.Create starting
I1119 22:35:30.499112 204649 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca.pem
I1119 22:35:30.499148 204649 main.go:143] libmachine: Decoding PEM data...
I1119 22:35:30.499166 204649 main.go:143] libmachine: Parsing certificate...
I1119 22:35:30.499221 204649 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21918-2347/.minikube/certs/cert.pem
I1119 22:35:30.499243 204649 main.go:143] libmachine: Decoding PEM data...
I1119 22:35:30.499252 204649 main.go:143] libmachine: Parsing certificate...
I1119 22:35:30.499620 204649 cli_runner.go:164] Run: docker network inspect old-k8s-version-264160 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1119 22:35:30.514882 204649 cli_runner.go:211] docker network inspect old-k8s-version-264160 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1119 22:35:30.514967 204649 network_create.go:284] running [docker network inspect old-k8s-version-264160] to gather additional debugging logs...
I1119 22:35:30.514989 204649 cli_runner.go:164] Run: docker network inspect old-k8s-version-264160
W1119 22:35:30.529792 204649 cli_runner.go:211] docker network inspect old-k8s-version-264160 returned with exit code 1
I1119 22:35:30.529827 204649 network_create.go:287] error running [docker network inspect old-k8s-version-264160]: docker network inspect old-k8s-version-264160: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-264160 not found
I1119 22:35:30.529841 204649 network_create.go:289] output of [docker network inspect old-k8s-version-264160]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-264160 not found
** /stderr **
I1119 22:35:30.529955 204649 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1119 22:35:30.546966 204649 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-b0fa93c84379 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:8a:8f:4f:8f:5a:a3} reservation:<nil>}
I1119 22:35:30.547286 204649 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-141c656f658f IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:62:30:08:ea:1a:b9} reservation:<nil>}
I1119 22:35:30.547626 204649 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-aae633a5ffae IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:ca:73:d8:2e:30:94} reservation:<nil>}
I1119 22:35:30.548050 204649 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40019f9110}
I1119 22:35:30.548074 204649 network_create.go:124] attempt to create docker network old-k8s-version-264160 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I1119 22:35:30.548135 204649 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-264160 old-k8s-version-264160
I1119 22:35:30.612059 204649 network_create.go:108] docker network old-k8s-version-264160 192.168.76.0/24 created
I1119 22:35:30.612094 204649 kic.go:121] calculated static IP "192.168.76.2" for the "old-k8s-version-264160" container
I1119 22:35:30.612164 204649 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1119 22:35:30.629392 204649 cli_runner.go:164] Run: docker volume create old-k8s-version-264160 --label name.minikube.sigs.k8s.io=old-k8s-version-264160 --label created_by.minikube.sigs.k8s.io=true
I1119 22:35:30.648884 204649 oci.go:103] Successfully created a docker volume old-k8s-version-264160
I1119 22:35:30.648982 204649 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-264160-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-264160 --entrypoint /usr/bin/test -v old-k8s-version-264160:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 -d /var/lib
I1119 22:35:31.199519 204649 oci.go:107] Successfully prepared a docker volume old-k8s-version-264160
I1119 22:35:31.199605 204649 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1119 22:35:31.199622 204649 kic.go:194] Starting extracting preloaded images to volume ...
I1119 22:35:31.199697 204649 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21918-2347/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-264160:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 -I lz4 -xf /preloaded.tar -C /extractDir
I1119 22:35:36.761404 204649 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21918-2347/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-264160:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 -I lz4 -xf /preloaded.tar -C /extractDir: (5.561655508s)
I1119 22:35:36.761444 204649 kic.go:203] duration metric: took 5.561818243s to extract preloaded images to volume ...
W1119 22:35:36.761577 204649 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1119 22:35:36.761693 204649 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1119 22:35:36.815053 204649 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-264160 --name old-k8s-version-264160 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-264160 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-264160 --network old-k8s-version-264160 --ip 192.168.76.2 --volume old-k8s-version-264160:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865
I1119 22:35:37.145087 204649 cli_runner.go:164] Run: docker container inspect old-k8s-version-264160 --format={{.State.Running}}
I1119 22:35:37.171282 204649 cli_runner.go:164] Run: docker container inspect old-k8s-version-264160 --format={{.State.Status}}
I1119 22:35:37.199972 204649 cli_runner.go:164] Run: docker exec old-k8s-version-264160 stat /var/lib/dpkg/alternatives/iptables
I1119 22:35:37.254683 204649 oci.go:144] the created container "old-k8s-version-264160" has a running status.
I1119 22:35:37.254726 204649 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa...
I1119 22:35:38.063600 204649 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1119 22:35:38.084666 204649 cli_runner.go:164] Run: docker container inspect old-k8s-version-264160 --format={{.State.Status}}
I1119 22:35:38.103756 204649 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1119 22:35:38.103781 204649 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-264160 chown docker:docker /home/docker/.ssh/authorized_keys]
I1119 22:35:38.159199 204649 cli_runner.go:164] Run: docker container inspect old-k8s-version-264160 --format={{.State.Status}}
I1119 22:35:38.177494 204649 machine.go:94] provisionDockerMachine start ...
I1119 22:35:38.177599 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:38.195122 204649 main.go:143] libmachine: Using SSH client type: native
I1119 22:35:38.195453 204649 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 33054 <nil> <nil>}
I1119 22:35:38.195469 204649 main.go:143] libmachine: About to run SSH command:
hostname
I1119 22:35:38.196184 204649 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1119 22:35:41.337849 204649 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-264160
I1119 22:35:41.337872 204649 ubuntu.go:182] provisioning hostname "old-k8s-version-264160"
I1119 22:35:41.337936 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:41.356186 204649 main.go:143] libmachine: Using SSH client type: native
I1119 22:35:41.356488 204649 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 33054 <nil> <nil>}
I1119 22:35:41.356501 204649 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-264160 && echo "old-k8s-version-264160" | sudo tee /etc/hostname
I1119 22:35:41.512063 204649 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-264160
I1119 22:35:41.512155 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:41.531307 204649 main.go:143] libmachine: Using SSH client type: native
I1119 22:35:41.531635 204649 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 33054 <nil> <nil>}
I1119 22:35:41.531659 204649 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-264160' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-264160/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-264160' | sudo tee -a /etc/hosts;
fi
fi
I1119 22:35:41.674522 204649 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1119 22:35:41.674549 204649 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21918-2347/.minikube CaCertPath:/home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21918-2347/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21918-2347/.minikube}
I1119 22:35:41.674570 204649 ubuntu.go:190] setting up certificates
I1119 22:35:41.674581 204649 provision.go:84] configureAuth start
I1119 22:35:41.674640 204649 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-264160
I1119 22:35:41.694614 204649 provision.go:143] copyHostCerts
I1119 22:35:41.694682 204649 exec_runner.go:144] found /home/jenkins/minikube-integration/21918-2347/.minikube/key.pem, removing ...
I1119 22:35:41.694696 204649 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21918-2347/.minikube/key.pem
I1119 22:35:41.694778 204649 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21918-2347/.minikube/key.pem (1675 bytes)
I1119 22:35:41.694893 204649 exec_runner.go:144] found /home/jenkins/minikube-integration/21918-2347/.minikube/ca.pem, removing ...
I1119 22:35:41.694904 204649 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21918-2347/.minikube/ca.pem
I1119 22:35:41.694933 204649 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21918-2347/.minikube/ca.pem (1082 bytes)
I1119 22:35:41.694994 204649 exec_runner.go:144] found /home/jenkins/minikube-integration/21918-2347/.minikube/cert.pem, removing ...
I1119 22:35:41.695002 204649 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21918-2347/.minikube/cert.pem
I1119 22:35:41.695027 204649 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21918-2347/.minikube/cert.pem (1123 bytes)
I1119 22:35:41.695078 204649 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21918-2347/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-264160 san=[127.0.0.1 192.168.76.2 localhost minikube old-k8s-version-264160]
I1119 22:35:41.985138 204649 provision.go:177] copyRemoteCerts
I1119 22:35:41.985210 204649 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1119 22:35:41.985253 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:42.011744 204649 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33054 SSHKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa Username:docker}
I1119 22:35:42.120462 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1119 22:35:42.153941 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1119 22:35:42.177275 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1119 22:35:42.199768 204649 provision.go:87] duration metric: took 525.161639ms to configureAuth
I1119 22:35:42.199797 204649 ubuntu.go:206] setting minikube options for container-runtime
I1119 22:35:42.199999 204649 config.go:182] Loaded profile config "old-k8s-version-264160": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1119 22:35:42.200014 204649 machine.go:97] duration metric: took 4.022496163s to provisionDockerMachine
I1119 22:35:42.200022 204649 client.go:176] duration metric: took 11.700970491s to LocalClient.Create
I1119 22:35:42.200036 204649 start.go:167] duration metric: took 11.70103788s to libmachine.API.Create "old-k8s-version-264160"
I1119 22:35:42.200044 204649 start.go:293] postStartSetup for "old-k8s-version-264160" (driver="docker")
I1119 22:35:42.200053 204649 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1119 22:35:42.200107 204649 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1119 22:35:42.200153 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:42.221138 204649 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33054 SSHKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa Username:docker}
I1119 22:35:42.326805 204649 ssh_runner.go:195] Run: cat /etc/os-release
I1119 22:35:42.330396 204649 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1119 22:35:42.330426 204649 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1119 22:35:42.330439 204649 filesync.go:126] Scanning /home/jenkins/minikube-integration/21918-2347/.minikube/addons for local assets ...
I1119 22:35:42.330497 204649 filesync.go:126] Scanning /home/jenkins/minikube-integration/21918-2347/.minikube/files for local assets ...
I1119 22:35:42.330585 204649 filesync.go:149] local asset: /home/jenkins/minikube-integration/21918-2347/.minikube/files/etc/ssl/certs/41442.pem -> 41442.pem in /etc/ssl/certs
I1119 22:35:42.330694 204649 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1119 22:35:42.338569 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/files/etc/ssl/certs/41442.pem --> /etc/ssl/certs/41442.pem (1708 bytes)
I1119 22:35:42.358341 204649 start.go:296] duration metric: took 158.281623ms for postStartSetup
I1119 22:35:42.358732 204649 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-264160
I1119 22:35:42.376951 204649 profile.go:143] Saving config to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/config.json ...
I1119 22:35:42.377417 204649 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1119 22:35:42.377467 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:42.395134 204649 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33054 SSHKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa Username:docker}
I1119 22:35:42.495341 204649 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1119 22:35:42.499972 204649 start.go:128] duration metric: took 12.004539402s to createHost
I1119 22:35:42.500036 204649 start.go:83] releasing machines lock for "old-k8s-version-264160", held for 12.004707247s
I1119 22:35:42.500112 204649 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-264160
I1119 22:35:42.517291 204649 ssh_runner.go:195] Run: cat /version.json
I1119 22:35:42.517425 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:42.517727 204649 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1119 22:35:42.517817 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:42.538882 204649 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33054 SSHKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa Username:docker}
I1119 22:35:42.547918 204649 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33054 SSHKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa Username:docker}
I1119 22:35:42.646164 204649 ssh_runner.go:195] Run: systemctl --version
I1119 22:35:42.733875 204649 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1119 22:35:42.738275 204649 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1119 22:35:42.738377 204649 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1119 22:35:42.768357 204649 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I1119 22:35:42.768382 204649 start.go:496] detecting cgroup driver to use...
I1119 22:35:42.768416 204649 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1119 22:35:42.768467 204649 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1119 22:35:42.786112 204649 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1119 22:35:42.799389 204649 docker.go:218] disabling cri-docker service (if available) ...
I1119 22:35:42.799458 204649 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1119 22:35:42.817550 204649 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1119 22:35:42.837250 204649 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1119 22:35:42.954428 204649 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1119 22:35:43.089677 204649 docker.go:234] disabling docker service ...
I1119 22:35:43.089796 204649 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1119 22:35:43.119196 204649 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1119 22:35:43.133883 204649 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1119 22:35:43.271748 204649 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1119 22:35:43.403111 204649 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1119 22:35:43.416605 204649 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1119 22:35:43.431762 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1119 22:35:43.441044 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1119 22:35:43.450280 204649 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1119 22:35:43.450355 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1119 22:35:43.460541 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1119 22:35:43.469380 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1119 22:35:43.478023 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1119 22:35:43.486801 204649 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1119 22:35:43.495927 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1119 22:35:43.505431 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1119 22:35:43.514750 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1119 22:35:43.524906 204649 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1119 22:35:43.533562 204649 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1119 22:35:43.541294 204649 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1119 22:35:43.666061 204649 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1119 22:35:43.801836 204649 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1119 22:35:43.801996 204649 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1119 22:35:43.807154 204649 start.go:564] Will wait 60s for crictl version
I1119 22:35:43.807283 204649 ssh_runner.go:195] Run: which crictl
I1119 22:35:43.810929 204649 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1119 22:35:43.840804 204649 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1119 22:35:43.840924 204649 ssh_runner.go:195] Run: containerd --version
I1119 22:35:43.863403 204649 ssh_runner.go:195] Run: containerd --version
I1119 22:35:43.892718 204649 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1119 22:35:43.895641 204649 cli_runner.go:164] Run: docker network inspect old-k8s-version-264160 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1119 22:35:43.912965 204649 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I1119 22:35:43.916790 204649 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1119 22:35:43.926772 204649 kubeadm.go:884] updating cluster {Name:old-k8s-version-264160 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-264160 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1119 22:35:43.926887 204649 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1119 22:35:43.926949 204649 ssh_runner.go:195] Run: sudo crictl images --output json
I1119 22:35:43.959370 204649 containerd.go:627] all images are preloaded for containerd runtime.
I1119 22:35:43.959391 204649 containerd.go:534] Images already preloaded, skipping extraction
I1119 22:35:43.959451 204649 ssh_runner.go:195] Run: sudo crictl images --output json
I1119 22:35:43.989251 204649 containerd.go:627] all images are preloaded for containerd runtime.
I1119 22:35:43.989276 204649 cache_images.go:86] Images are preloaded, skipping loading
I1119 22:35:43.989284 204649 kubeadm.go:935] updating node { 192.168.76.2 8443 v1.28.0 containerd true true} ...
I1119 22:35:43.989377 204649 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-264160 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-264160 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1119 22:35:43.989454 204649 ssh_runner.go:195] Run: sudo crictl info
I1119 22:35:44.018509 204649 cni.go:84] Creating CNI manager for ""
I1119 22:35:44.018532 204649 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1119 22:35:44.018554 204649 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1119 22:35:44.018590 204649 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-264160 NodeName:old-k8s-version-264160 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt
StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1119 22:35:44.018720 204649 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-264160"
kubeletExtraArgs:
node-ip: 192.168.76.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1119 22:35:44.018791 204649 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1119 22:35:44.027774 204649 binaries.go:51] Found k8s binaries, skipping transfer
I1119 22:35:44.027843 204649 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1119 22:35:44.035977 204649 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1119 22:35:44.049828 204649 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1119 22:35:44.063834 204649 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2176 bytes)
I1119 22:35:44.078459 204649 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I1119 22:35:44.082544 204649 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1119 22:35:44.093549 204649 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1119 22:35:44.218127 204649 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1119 22:35:44.238847 204649 certs.go:69] Setting up /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160 for IP: 192.168.76.2
I1119 22:35:44.238867 204649 certs.go:195] generating shared ca certs ...
I1119 22:35:44.238885 204649 certs.go:227] acquiring lock for ca certs: {Name:mk76285c445bf14c1e73dedba3201c9181209ff4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:44.239062 204649 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21918-2347/.minikube/ca.key
I1119 22:35:44.239112 204649 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21918-2347/.minikube/proxy-client-ca.key
I1119 22:35:44.239124 204649 certs.go:257] generating profile certs ...
I1119 22:35:44.239186 204649 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/client.key
I1119 22:35:44.239203 204649 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/client.crt with IP's: []
I1119 22:35:44.811737 204649 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/client.crt ...
I1119 22:35:44.811764 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/client.crt: {Name:mk14e11ecda6c7214508a5ade0f9ee915e780f3e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:44.811951 204649 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/client.key ...
I1119 22:35:44.811960 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/client.key: {Name:mk0adfc8036cdd3c163e4cffd5e262cb5308dfe9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:44.812038 204649 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.key.955d0b5b
I1119 22:35:44.812063 204649 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.crt.955d0b5b with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I1119 22:35:45.101024 204649 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.crt.955d0b5b ...
I1119 22:35:45.101056 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.crt.955d0b5b: {Name:mk5142ac1d579327ae160e83fc7f68b0f3557595 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:45.101255 204649 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.key.955d0b5b ...
I1119 22:35:45.101267 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.key.955d0b5b: {Name:mkc12bee6747eface51cd5e77da3f942ad5e5618 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:45.101361 204649 certs.go:382] copying /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.crt.955d0b5b -> /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.crt
I1119 22:35:45.101462 204649 certs.go:386] copying /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.key.955d0b5b -> /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.key
I1119 22:35:45.101522 204649 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.key
I1119 22:35:45.101539 204649 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.crt with IP's: []
I1119 22:35:45.832941 204649 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.crt ...
I1119 22:35:45.832971 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.crt: {Name:mk306cbc09a8a4cdf49bd23a7f735885d2e6d6d8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:45.833166 204649 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.key ...
I1119 22:35:45.833185 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.key: {Name:mk51455941ef13941a00f8719c0c4a50b2eaa3aa Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:45.833395 204649 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/4144.pem (1338 bytes)
W1119 22:35:45.833433 204649 certs.go:480] ignoring /home/jenkins/minikube-integration/21918-2347/.minikube/certs/4144_empty.pem, impossibly tiny 0 bytes
I1119 22:35:45.833442 204649 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca-key.pem (1675 bytes)
I1119 22:35:45.833468 204649 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca.pem (1082 bytes)
I1119 22:35:45.833497 204649 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/cert.pem (1123 bytes)
I1119 22:35:45.833529 204649 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/key.pem (1675 bytes)
I1119 22:35:45.833577 204649 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-2347/.minikube/files/etc/ssl/certs/41442.pem (1708 bytes)
I1119 22:35:45.834165 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1119 22:35:45.856349 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1671 bytes)
I1119 22:35:45.877913 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1119 22:35:45.896516 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1119 22:35:45.914586 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1119 22:35:45.933361 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1119 22:35:45.951038 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1119 22:35:45.973047 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1119 22:35:45.994027 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1119 22:35:46.025730 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/certs/4144.pem --> /usr/share/ca-certificates/4144.pem (1338 bytes)
I1119 22:35:46.045750 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/files/etc/ssl/certs/41442.pem --> /usr/share/ca-certificates/41442.pem (1708 bytes)
I1119 22:35:46.073629 204649 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1119 22:35:46.087614 204649 ssh_runner.go:195] Run: openssl version
I1119 22:35:46.094872 204649 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1119 22:35:46.103931 204649 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1119 22:35:46.108400 204649 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 19 21:49 /usr/share/ca-certificates/minikubeCA.pem
I1119 22:35:46.108519 204649 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1119 22:35:46.165543 204649 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1119 22:35:46.174470 204649 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/4144.pem && ln -fs /usr/share/ca-certificates/4144.pem /etc/ssl/certs/4144.pem"
I1119 22:35:46.182680 204649 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/4144.pem
I1119 22:35:46.186577 204649 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 19 21:56 /usr/share/ca-certificates/4144.pem
I1119 22:35:46.186637 204649 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/4144.pem
I1119 22:35:46.228043 204649 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/4144.pem /etc/ssl/certs/51391683.0"
I1119 22:35:46.236269 204649 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/41442.pem && ln -fs /usr/share/ca-certificates/41442.pem /etc/ssl/certs/41442.pem"
I1119 22:35:46.244687 204649 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/41442.pem
I1119 22:35:46.248576 204649 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 19 21:56 /usr/share/ca-certificates/41442.pem
I1119 22:35:46.248696 204649 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/41442.pem
I1119 22:35:46.290804 204649 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/41442.pem /etc/ssl/certs/3ec20f2e.0"
I1119 22:35:46.299091 204649 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1119 22:35:46.302689 204649 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1119 22:35:46.302790 204649 kubeadm.go:401] StartCluster: {Name:old-k8s-version-264160 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-264160 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1119 22:35:46.302872 204649 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1119 22:35:46.302930 204649 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1119 22:35:46.341874 204649 cri.go:89] found id: ""
I1119 22:35:46.341955 204649 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1119 22:35:46.349861 204649 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1119 22:35:46.358624 204649 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1119 22:35:46.358700 204649 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1119 22:35:46.366859 204649 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1119 22:35:46.366882 204649 kubeadm.go:158] found existing configuration files:
I1119 22:35:46.366956 204649 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1119 22:35:46.375053 204649 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1119 22:35:46.375118 204649 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1119 22:35:46.382569 204649 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1119 22:35:46.390549 204649 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1119 22:35:46.390660 204649 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1119 22:35:46.398378 204649 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1119 22:35:46.406002 204649 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1119 22:35:46.406127 204649 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1119 22:35:46.414558 204649 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1119 22:35:46.422462 204649 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1119 22:35:46.422528 204649 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1119 22:35:46.430234 204649 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1119 22:35:46.480821 204649 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1119 22:35:46.480973 204649 kubeadm.go:319] [preflight] Running pre-flight checks
I1119 22:35:46.518306 204649 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1119 22:35:46.518408 204649 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I1119 22:35:46.518469 204649 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1119 22:35:46.518555 204649 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1119 22:35:46.518627 204649 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1119 22:35:46.518704 204649 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1119 22:35:46.518775 204649 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1119 22:35:46.518848 204649 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1119 22:35:46.518928 204649 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1119 22:35:46.518993 204649 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1119 22:35:46.519065 204649 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1119 22:35:46.519136 204649 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1119 22:35:46.603387 204649 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1119 22:35:46.603532 204649 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1119 22:35:46.603659 204649 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1119 22:35:46.748614 204649 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1119 22:35:46.754520 204649 out.go:252] - Generating certificates and keys ...
I1119 22:35:46.754636 204649 kubeadm.go:319] [certs] Using existing ca certificate authority
I1119 22:35:46.754728 204649 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1119 22:35:47.362621 204649 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1119 22:35:47.861152 204649 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1119 22:35:48.578567 204649 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1119 22:35:48.709308 204649 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1119 22:35:49.572586 204649 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1119 22:35:49.572742 204649 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-264160] and IPs [192.168.76.2 127.0.0.1 ::1]
I1119 22:35:50.286968 204649 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1119 22:35:50.287350 204649 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-264160] and IPs [192.168.76.2 127.0.0.1 ::1]
I1119 22:35:50.729163 204649 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1119 22:35:51.087355 204649 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1119 22:35:51.301494 204649 kubeadm.go:319] [certs] Generating "sa" key and public key
I1119 22:35:51.301799 204649 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1119 22:35:52.439151 204649 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1119 22:35:52.767854 204649 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1119 22:35:53.170174 204649 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1119 22:35:53.873745 204649 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1119 22:35:53.874592 204649 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1119 22:35:53.877867 204649 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1119 22:35:53.883494 204649 out.go:252] - Booting up control plane ...
I1119 22:35:53.883605 204649 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1119 22:35:53.883687 204649 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1119 22:35:53.883756 204649 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1119 22:35:53.900950 204649 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1119 22:35:53.901278 204649 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1119 22:35:53.901523 204649 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1119 22:35:54.050697 204649 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1119 22:36:04.052724 204649 kubeadm.go:319] [apiclient] All control plane components are healthy after 10.003761 seconds
I1119 22:36:04.052869 204649 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1119 22:36:04.072130 204649 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1119 22:36:04.605781 204649 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1119 22:36:04.606002 204649 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-264160 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1119 22:36:05.122165 204649 kubeadm.go:319] [bootstrap-token] Using token: t3hgjm.t27pk8uf8r4mqrko
I1119 22:36:05.125207 204649 out.go:252] - Configuring RBAC rules ...
I1119 22:36:05.125347 204649 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1119 22:36:05.138372 204649 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1119 22:36:05.149292 204649 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1119 22:36:05.153962 204649 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1119 22:36:05.159111 204649 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1119 22:36:05.163924 204649 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1119 22:36:05.183969 204649 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1119 22:36:05.490668 204649 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1119 22:36:05.544743 204649 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1119 22:36:05.545712 204649 kubeadm.go:319]
I1119 22:36:05.545794 204649 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1119 22:36:05.545800 204649 kubeadm.go:319]
I1119 22:36:05.545881 204649 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1119 22:36:05.545886 204649 kubeadm.go:319]
I1119 22:36:05.545912 204649 kubeadm.go:319] mkdir -p $HOME/.kube
I1119 22:36:05.545975 204649 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1119 22:36:05.546029 204649 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1119 22:36:05.546036 204649 kubeadm.go:319]
I1119 22:36:05.546092 204649 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1119 22:36:05.546097 204649 kubeadm.go:319]
I1119 22:36:05.546192 204649 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1119 22:36:05.546198 204649 kubeadm.go:319]
I1119 22:36:05.546252 204649 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1119 22:36:05.546330 204649 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1119 22:36:05.546401 204649 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1119 22:36:05.546405 204649 kubeadm.go:319]
I1119 22:36:05.546493 204649 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1119 22:36:05.546572 204649 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1119 22:36:05.546577 204649 kubeadm.go:319]
I1119 22:36:05.546665 204649 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token t3hgjm.t27pk8uf8r4mqrko \
I1119 22:36:05.546773 204649 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:f3dc8233c963d7fa33b7a72da6102de3e0dbc1bf6e99b77f8426922389e565f9 \
I1119 22:36:05.546794 204649 kubeadm.go:319] --control-plane
I1119 22:36:05.546798 204649 kubeadm.go:319]
I1119 22:36:05.546886 204649 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1119 22:36:05.546890 204649 kubeadm.go:319]
I1119 22:36:05.546975 204649 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token t3hgjm.t27pk8uf8r4mqrko \
I1119 22:36:05.547080 204649 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:f3dc8233c963d7fa33b7a72da6102de3e0dbc1bf6e99b77f8426922389e565f9
I1119 22:36:05.551148 204649 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I1119 22:36:05.551265 204649 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1119 22:36:05.551281 204649 cni.go:84] Creating CNI manager for ""
I1119 22:36:05.551288 204649 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1119 22:36:05.554507 204649 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1119 22:36:05.557507 204649 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1119 22:36:05.576310 204649 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1119 22:36:05.576331 204649 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1119 22:36:05.593718 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1119 22:36:06.658889 204649 ssh_runner.go:235] Completed: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml: (1.065138821s)
I1119 22:36:06.658975 204649 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1119 22:36:06.659094 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:06.659175 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-264160 minikube.k8s.io/updated_at=2025_11_19T22_36_06_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=08454a179ffa60c8ae500105aac58654b5cdef58 minikube.k8s.io/name=old-k8s-version-264160 minikube.k8s.io/primary=true
I1119 22:36:06.818009 204649 ops.go:34] apiserver oom_adj: -16
I1119 22:36:06.818101 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:07.318669 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:07.818290 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:08.318653 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:08.818829 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:09.318705 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:09.818670 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:10.318656 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:10.818343 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:11.318742 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:11.818660 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:12.318643 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:12.818204 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:13.318233 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:13.818478 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:14.318102 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:14.818178 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:15.318224 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:15.818601 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:16.319007 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:16.818836 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:17.318883 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:17.818083 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:18.005461 204649 kubeadm.go:1114] duration metric: took 11.346407343s to wait for elevateKubeSystemPrivileges
I1119 22:36:18.005498 204649 kubeadm.go:403] duration metric: took 31.702712181s to StartCluster
I1119 22:36:18.005516 204649 settings.go:142] acquiring lock: {Name:mk5c8f7d46662d574c7e53cf7b09709855a1e14f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:36:18.005603 204649 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21918-2347/kubeconfig
I1119 22:36:18.006647 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/kubeconfig: {Name:mk670f88d9cb1be22f05f7db4ddcfb97af791e42 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:36:18.006944 204649 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1119 22:36:18.006951 204649 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1119 22:36:18.007274 204649 config.go:182] Loaded profile config "old-k8s-version-264160": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1119 22:36:18.007313 204649 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1119 22:36:18.007401 204649 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-264160"
I1119 22:36:18.007419 204649 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-264160"
I1119 22:36:18.007444 204649 host.go:66] Checking if "old-k8s-version-264160" exists ...
I1119 22:36:18.007919 204649 cli_runner.go:164] Run: docker container inspect old-k8s-version-264160 --format={{.State.Status}}
I1119 22:36:18.008446 204649 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-264160"
I1119 22:36:18.008469 204649 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-264160"
I1119 22:36:18.008780 204649 cli_runner.go:164] Run: docker container inspect old-k8s-version-264160 --format={{.State.Status}}
I1119 22:36:18.011866 204649 out.go:179] * Verifying Kubernetes components...
I1119 22:36:18.014838 204649 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1119 22:36:18.055880 204649 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:36:18.056763 204649 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-264160"
I1119 22:36:18.056800 204649 host.go:66] Checking if "old-k8s-version-264160" exists ...
I1119 22:36:18.057242 204649 cli_runner.go:164] Run: docker container inspect old-k8s-version-264160 --format={{.State.Status}}
I1119 22:36:18.059443 204649 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1119 22:36:18.059467 204649 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1119 22:36:18.059527 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:36:18.093613 204649 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1119 22:36:18.093726 204649 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33054 SSHKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa Username:docker}
I1119 22:36:18.095300 204649 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1119 22:36:18.095428 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:36:18.135800 204649 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33054 SSHKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa Username:docker}
I1119 22:36:18.357324 204649 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1119 22:36:18.357451 204649 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1119 22:36:18.439741 204649 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1119 22:36:18.443940 204649 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1119 22:36:19.165631 204649 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-264160" to be "Ready" ...
I1119 22:36:19.165952 204649 start.go:977] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I1119 22:36:19.668262 204649 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.228448448s)
I1119 22:36:19.668305 204649 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (1.224346607s)
I1119 22:36:19.682930 204649 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-264160" context rescaled to 1 replicas
I1119 22:36:19.691208 204649 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1119 22:36:19.694506 204649 addons.go:515] duration metric: took 1.687167131s for enable addons: enabled=[storage-provisioner default-storageclass]
W1119 22:36:21.170389 204649 node_ready.go:57] node "old-k8s-version-264160" has "Ready":"False" status (will retry)
W1119 22:36:23.669181 204649 node_ready.go:57] node "old-k8s-version-264160" has "Ready":"False" status (will retry)
W1119 22:36:26.169468 204649 node_ready.go:57] node "old-k8s-version-264160" has "Ready":"False" status (will retry)
W1119 22:36:28.668771 204649 node_ready.go:57] node "old-k8s-version-264160" has "Ready":"False" status (will retry)
W1119 22:36:30.669387 204649 node_ready.go:57] node "old-k8s-version-264160" has "Ready":"False" status (will retry)
I1119 22:36:31.179436 204649 node_ready.go:49] node "old-k8s-version-264160" is "Ready"
I1119 22:36:31.179462 204649 node_ready.go:38] duration metric: took 12.013798629s for node "old-k8s-version-264160" to be "Ready" ...
I1119 22:36:31.179475 204649 api_server.go:52] waiting for apiserver process to appear ...
I1119 22:36:31.179538 204649 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1119 22:36:31.199071 204649 api_server.go:72] duration metric: took 13.192088991s to wait for apiserver process to appear ...
I1119 22:36:31.199094 204649 api_server.go:88] waiting for apiserver healthz status ...
I1119 22:36:31.199116 204649 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:36:31.209770 204649 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I1119 22:36:31.211739 204649 api_server.go:141] control plane version: v1.28.0
I1119 22:36:31.211767 204649 api_server.go:131] duration metric: took 12.666386ms to wait for apiserver health ...
I1119 22:36:31.211777 204649 system_pods.go:43] waiting for kube-system pods to appear ...
I1119 22:36:31.216012 204649 system_pods.go:59] 8 kube-system pods found
I1119 22:36:31.216054 204649 system_pods.go:61] "coredns-5dd5756b68-vz7zx" [7e7645ad-49a9-4f0c-89cc-128538e4d95c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:36:31.216062 204649 system_pods.go:61] "etcd-old-k8s-version-264160" [1bd42d38-2921-483d-b656-d1f12178141b] Running
I1119 22:36:31.216068 204649 system_pods.go:61] "kindnet-m9nqq" [2f9f6fbb-c725-49fd-ba3a-c84a7640aac2] Running
I1119 22:36:31.216073 204649 system_pods.go:61] "kube-apiserver-old-k8s-version-264160" [454724a2-4fd6-4dc1-9cc1-a4b60944a9df] Running
I1119 22:36:31.216084 204649 system_pods.go:61] "kube-controller-manager-old-k8s-version-264160" [a5ad5849-09a1-43bd-861a-8c92712b0a14] Running
I1119 22:36:31.216088 204649 system_pods.go:61] "kube-proxy-zzmnr" [3ee1645f-fba5-4206-bb83-70d298a4c5ac] Running
I1119 22:36:31.216100 204649 system_pods.go:61] "kube-scheduler-old-k8s-version-264160" [fbad20e1-7729-4503-b929-bc32986a00e8] Running
I1119 22:36:31.216106 204649 system_pods.go:61] "storage-provisioner" [8e2dda77-5a6d-4796-926b-5a06158f8cdf] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1119 22:36:31.216112 204649 system_pods.go:74] duration metric: took 4.329001ms to wait for pod list to return data ...
I1119 22:36:31.216127 204649 default_sa.go:34] waiting for default service account to be created ...
I1119 22:36:31.219246 204649 default_sa.go:45] found service account: "default"
I1119 22:36:31.219283 204649 default_sa.go:55] duration metric: took 3.150461ms for default service account to be created ...
I1119 22:36:31.219293 204649 system_pods.go:116] waiting for k8s-apps to be running ...
I1119 22:36:31.226730 204649 system_pods.go:86] 8 kube-system pods found
I1119 22:36:31.226780 204649 system_pods.go:89] "coredns-5dd5756b68-vz7zx" [7e7645ad-49a9-4f0c-89cc-128538e4d95c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:36:31.226788 204649 system_pods.go:89] "etcd-old-k8s-version-264160" [1bd42d38-2921-483d-b656-d1f12178141b] Running
I1119 22:36:31.226795 204649 system_pods.go:89] "kindnet-m9nqq" [2f9f6fbb-c725-49fd-ba3a-c84a7640aac2] Running
I1119 22:36:31.226801 204649 system_pods.go:89] "kube-apiserver-old-k8s-version-264160" [454724a2-4fd6-4dc1-9cc1-a4b60944a9df] Running
I1119 22:36:31.226820 204649 system_pods.go:89] "kube-controller-manager-old-k8s-version-264160" [a5ad5849-09a1-43bd-861a-8c92712b0a14] Running
I1119 22:36:31.226840 204649 system_pods.go:89] "kube-proxy-zzmnr" [3ee1645f-fba5-4206-bb83-70d298a4c5ac] Running
I1119 22:36:31.226854 204649 system_pods.go:89] "kube-scheduler-old-k8s-version-264160" [fbad20e1-7729-4503-b929-bc32986a00e8] Running
I1119 22:36:31.226880 204649 system_pods.go:89] "storage-provisioner" [8e2dda77-5a6d-4796-926b-5a06158f8cdf] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1119 22:36:31.226914 204649 retry.go:31] will retry after 302.789316ms: missing components: kube-dns
I1119 22:36:31.534752 204649 system_pods.go:86] 8 kube-system pods found
I1119 22:36:31.534798 204649 system_pods.go:89] "coredns-5dd5756b68-vz7zx" [7e7645ad-49a9-4f0c-89cc-128538e4d95c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:36:31.534805 204649 system_pods.go:89] "etcd-old-k8s-version-264160" [1bd42d38-2921-483d-b656-d1f12178141b] Running
I1119 22:36:31.534811 204649 system_pods.go:89] "kindnet-m9nqq" [2f9f6fbb-c725-49fd-ba3a-c84a7640aac2] Running
I1119 22:36:31.534815 204649 system_pods.go:89] "kube-apiserver-old-k8s-version-264160" [454724a2-4fd6-4dc1-9cc1-a4b60944a9df] Running
I1119 22:36:31.534821 204649 system_pods.go:89] "kube-controller-manager-old-k8s-version-264160" [a5ad5849-09a1-43bd-861a-8c92712b0a14] Running
I1119 22:36:31.534825 204649 system_pods.go:89] "kube-proxy-zzmnr" [3ee1645f-fba5-4206-bb83-70d298a4c5ac] Running
I1119 22:36:31.534829 204649 system_pods.go:89] "kube-scheduler-old-k8s-version-264160" [fbad20e1-7729-4503-b929-bc32986a00e8] Running
I1119 22:36:31.534838 204649 system_pods.go:89] "storage-provisioner" [8e2dda77-5a6d-4796-926b-5a06158f8cdf] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1119 22:36:31.534852 204649 retry.go:31] will retry after 260.752212ms: missing components: kube-dns
I1119 22:36:31.802433 204649 system_pods.go:86] 8 kube-system pods found
I1119 22:36:31.802477 204649 system_pods.go:89] "coredns-5dd5756b68-vz7zx" [7e7645ad-49a9-4f0c-89cc-128538e4d95c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:36:31.802484 204649 system_pods.go:89] "etcd-old-k8s-version-264160" [1bd42d38-2921-483d-b656-d1f12178141b] Running
I1119 22:36:31.802492 204649 system_pods.go:89] "kindnet-m9nqq" [2f9f6fbb-c725-49fd-ba3a-c84a7640aac2] Running
I1119 22:36:31.802496 204649 system_pods.go:89] "kube-apiserver-old-k8s-version-264160" [454724a2-4fd6-4dc1-9cc1-a4b60944a9df] Running
I1119 22:36:31.802502 204649 system_pods.go:89] "kube-controller-manager-old-k8s-version-264160" [a5ad5849-09a1-43bd-861a-8c92712b0a14] Running
I1119 22:36:31.802506 204649 system_pods.go:89] "kube-proxy-zzmnr" [3ee1645f-fba5-4206-bb83-70d298a4c5ac] Running
I1119 22:36:31.802510 204649 system_pods.go:89] "kube-scheduler-old-k8s-version-264160" [fbad20e1-7729-4503-b929-bc32986a00e8] Running
I1119 22:36:31.802517 204649 system_pods.go:89] "storage-provisioner" [8e2dda77-5a6d-4796-926b-5a06158f8cdf] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1119 22:36:31.802540 204649 retry.go:31] will retry after 341.00697ms: missing components: kube-dns
I1119 22:36:32.148247 204649 system_pods.go:86] 8 kube-system pods found
I1119 22:36:32.148281 204649 system_pods.go:89] "coredns-5dd5756b68-vz7zx" [7e7645ad-49a9-4f0c-89cc-128538e4d95c] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:36:32.148298 204649 system_pods.go:89] "etcd-old-k8s-version-264160" [1bd42d38-2921-483d-b656-d1f12178141b] Running
I1119 22:36:32.148304 204649 system_pods.go:89] "kindnet-m9nqq" [2f9f6fbb-c725-49fd-ba3a-c84a7640aac2] Running
I1119 22:36:32.148309 204649 system_pods.go:89] "kube-apiserver-old-k8s-version-264160" [454724a2-4fd6-4dc1-9cc1-a4b60944a9df] Running
I1119 22:36:32.148314 204649 system_pods.go:89] "kube-controller-manager-old-k8s-version-264160" [a5ad5849-09a1-43bd-861a-8c92712b0a14] Running
I1119 22:36:32.148320 204649 system_pods.go:89] "kube-proxy-zzmnr" [3ee1645f-fba5-4206-bb83-70d298a4c5ac] Running
I1119 22:36:32.148329 204649 system_pods.go:89] "kube-scheduler-old-k8s-version-264160" [fbad20e1-7729-4503-b929-bc32986a00e8] Running
I1119 22:36:32.148333 204649 system_pods.go:89] "storage-provisioner" [8e2dda77-5a6d-4796-926b-5a06158f8cdf] Running
I1119 22:36:32.148348 204649 system_pods.go:126] duration metric: took 929.047421ms to wait for k8s-apps to be running ...
I1119 22:36:32.148356 204649 system_svc.go:44] waiting for kubelet service to be running ....
I1119 22:36:32.148423 204649 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1119 22:36:32.175720 204649 system_svc.go:56] duration metric: took 27.353086ms WaitForService to wait for kubelet
I1119 22:36:32.175754 204649 kubeadm.go:587] duration metric: took 14.168776732s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1119 22:36:32.175782 204649 node_conditions.go:102] verifying NodePressure condition ...
I1119 22:36:32.178856 204649 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1119 22:36:32.178889 204649 node_conditions.go:123] node cpu capacity is 2
I1119 22:36:32.178903 204649 node_conditions.go:105] duration metric: took 3.115367ms to run NodePressure ...
I1119 22:36:32.178915 204649 start.go:242] waiting for startup goroutines ...
I1119 22:36:32.178933 204649 start.go:247] waiting for cluster config update ...
I1119 22:36:32.178949 204649 start.go:256] writing updated cluster config ...
I1119 22:36:32.179275 204649 ssh_runner.go:195] Run: rm -f paused
I1119 22:36:32.186678 204649 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1119 22:36:32.192039 204649 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-vz7zx" in "kube-system" namespace to be "Ready" or be gone ...
W1119 22:36:34.198510 204649 pod_ready.go:104] pod "coredns-5dd5756b68-vz7zx" is not "Ready", error: <nil>
W1119 22:36:36.198937 204649 pod_ready.go:104] pod "coredns-5dd5756b68-vz7zx" is not "Ready", error: <nil>
W1119 22:36:38.698791 204649 pod_ready.go:104] pod "coredns-5dd5756b68-vz7zx" is not "Ready", error: <nil>
W1119 22:36:41.198015 204649 pod_ready.go:104] pod "coredns-5dd5756b68-vz7zx" is not "Ready", error: <nil>
I1119 22:36:41.698204 204649 pod_ready.go:94] pod "coredns-5dd5756b68-vz7zx" is "Ready"
I1119 22:36:41.698233 204649 pod_ready.go:86] duration metric: took 9.50616482s for pod "coredns-5dd5756b68-vz7zx" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:41.701276 204649 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:41.706418 204649 pod_ready.go:94] pod "etcd-old-k8s-version-264160" is "Ready"
I1119 22:36:41.706451 204649 pod_ready.go:86] duration metric: took 5.148925ms for pod "etcd-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:41.709706 204649 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:41.715470 204649 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-264160" is "Ready"
I1119 22:36:41.715499 204649 pod_ready.go:86] duration metric: took 5.766499ms for pod "kube-apiserver-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:41.718802 204649 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:41.896506 204649 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-264160" is "Ready"
I1119 22:36:41.896538 204649 pod_ready.go:86] duration metric: took 177.710699ms for pod "kube-controller-manager-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:42.096924 204649 pod_ready.go:83] waiting for pod "kube-proxy-zzmnr" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:42.496606 204649 pod_ready.go:94] pod "kube-proxy-zzmnr" is "Ready"
I1119 22:36:42.496635 204649 pod_ready.go:86] duration metric: took 399.679699ms for pod "kube-proxy-zzmnr" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:42.696640 204649 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:43.096504 204649 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-264160" is "Ready"
I1119 22:36:43.096533 204649 pod_ready.go:86] duration metric: took 399.863388ms for pod "kube-scheduler-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:43.096547 204649 pod_ready.go:40] duration metric: took 10.90982149s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1119 22:36:43.158402 204649 start.go:628] kubectl: 1.33.2, cluster: 1.28.0 (minor skew: 5)
I1119 22:36:43.161490 204649 out.go:203]
W1119 22:36:43.164427 204649 out.go:285] ! /usr/local/bin/kubectl is version 1.33.2, which may have incompatibilities with Kubernetes 1.28.0.
I1119 22:36:43.167321 204649 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1119 22:36:43.171088 204649 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-264160" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
85ec8d942d110 1611cd07b61d5 7 seconds ago Running busybox 0 1b11528fdca0b busybox default
0b5a95c859ac3 97e04611ad434 21 seconds ago Running coredns 0 aa34d2193fc4c coredns-5dd5756b68-vz7zx kube-system
f62b743b6725e ba04bb24b9575 21 seconds ago Running storage-provisioner 0 2bededbe57122 storage-provisioner kube-system
3dc4045566ee8 b1a8c6f707935 33 seconds ago Running kindnet-cni 0 232dd2b4b80b5 kindnet-m9nqq kube-system
e5c22c9877dd1 940f54a5bcae9 35 seconds ago Running kube-proxy 0 f45778acb4883 kube-proxy-zzmnr kube-system
0aa1bd28b6073 762dce4090c5f 57 seconds ago Running kube-scheduler 0 4b7124d3d4b79 kube-scheduler-old-k8s-version-264160 kube-system
83a25278b16a7 00543d2fe5d71 57 seconds ago Running kube-apiserver 0 67f5df81322ce kube-apiserver-old-k8s-version-264160 kube-system
9ce9313d9aae4 46cc66ccc7c19 57 seconds ago Running kube-controller-manager 0 0783ca7945d35 kube-controller-manager-old-k8s-version-264160 kube-system
85f86fccea082 9cdd6470f48c8 57 seconds ago Running etcd 0 4969a45c845f9 etcd-old-k8s-version-264160 kube-system
==> containerd <==
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.649388743Z" level=info msg="CreateContainer within sandbox \"aa34d2193fc4cf037239bc48a6fac96674b060cb63b8de7320bb53007ec52479\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.651192399Z" level=info msg="connecting to shim f62b743b6725ec9ff1e91e664da6c9ce15d837afbab3608cc02fec3c9bd3d929" address="unix:///run/containerd/s/1693fe8eea8ad33a7610805dc3ed40de55c61613614162362386d2386e86ea05" protocol=ttrpc version=3
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.675391127Z" level=info msg="Container 0b5a95c859ac383d11c4aa9fb013d9cb4c21b0ac201d6a26cc3ec130b9027e9f: CDI devices from CRI Config.CDIDevices: []"
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.690874899Z" level=info msg="CreateContainer within sandbox \"aa34d2193fc4cf037239bc48a6fac96674b060cb63b8de7320bb53007ec52479\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"0b5a95c859ac383d11c4aa9fb013d9cb4c21b0ac201d6a26cc3ec130b9027e9f\""
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.691891617Z" level=info msg="StartContainer for \"0b5a95c859ac383d11c4aa9fb013d9cb4c21b0ac201d6a26cc3ec130b9027e9f\""
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.692987039Z" level=info msg="connecting to shim 0b5a95c859ac383d11c4aa9fb013d9cb4c21b0ac201d6a26cc3ec130b9027e9f" address="unix:///run/containerd/s/ba4b3d499342aaf3ebd6be16fa5ad2a140167ea49a534a0a812a3977c5dcf983" protocol=ttrpc version=3
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.744679204Z" level=info msg="StartContainer for \"f62b743b6725ec9ff1e91e664da6c9ce15d837afbab3608cc02fec3c9bd3d929\" returns successfully"
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.792512880Z" level=info msg="StartContainer for \"0b5a95c859ac383d11c4aa9fb013d9cb4c21b0ac201d6a26cc3ec130b9027e9f\" returns successfully"
Nov 19 22:36:43 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:43.710209778Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:2af6deb4-937f-4b9b-9de6-995e75a080b8,Namespace:default,Attempt:0,}"
Nov 19 22:36:43 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:43.790459559Z" level=info msg="connecting to shim 1b11528fdca0ba74e5c7786578d6850eb5b37f9540b5e04e610639ce7fbd811f" address="unix:///run/containerd/s/b036a265eb01a921a8d2ed1a42211f4774df4a741b42e6007a96fa06394b6381" namespace=k8s.io protocol=ttrpc version=3
Nov 19 22:36:43 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:43.849747951Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:2af6deb4-937f-4b9b-9de6-995e75a080b8,Namespace:default,Attempt:0,} returns sandbox id \"1b11528fdca0ba74e5c7786578d6850eb5b37f9540b5e04e610639ce7fbd811f\""
Nov 19 22:36:43 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:43.854324085Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.047353549Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.049424358Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=1937184"
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.052705979Z" level=info msg="ImageCreate event name:\"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.058110078Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.059158943Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"1935750\" in 2.204520106s"
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.059209750Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\""
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.063270047Z" level=info msg="CreateContainer within sandbox \"1b11528fdca0ba74e5c7786578d6850eb5b37f9540b5e04e610639ce7fbd811f\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.078717460Z" level=info msg="Container 85ec8d942d1102ad7f23f0923c0afa921c51c4b09ac0f93dc33203a257d7ca57: CDI devices from CRI Config.CDIDevices: []"
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.090943881Z" level=info msg="CreateContainer within sandbox \"1b11528fdca0ba74e5c7786578d6850eb5b37f9540b5e04e610639ce7fbd811f\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"85ec8d942d1102ad7f23f0923c0afa921c51c4b09ac0f93dc33203a257d7ca57\""
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.091676862Z" level=info msg="StartContainer for \"85ec8d942d1102ad7f23f0923c0afa921c51c4b09ac0f93dc33203a257d7ca57\""
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.092550045Z" level=info msg="connecting to shim 85ec8d942d1102ad7f23f0923c0afa921c51c4b09ac0f93dc33203a257d7ca57" address="unix:///run/containerd/s/b036a265eb01a921a8d2ed1a42211f4774df4a741b42e6007a96fa06394b6381" protocol=ttrpc version=3
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.167983720Z" level=info msg="StartContainer for \"85ec8d942d1102ad7f23f0923c0afa921c51c4b09ac0f93dc33203a257d7ca57\" returns successfully"
Nov 19 22:36:52 old-k8s-version-264160 containerd[760]: E1119 22:36:52.581929 760 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [0b5a95c859ac383d11c4aa9fb013d9cb4c21b0ac201d6a26cc3ec130b9027e9f] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = b7aacdf6a6aa730aafe4d018cac9b7b5ecfb346cba84a99f64521f87aef8b4958639c1cf97967716465791d05bd38f372615327b7cb1d93c850bae532744d54d
CoreDNS-1.10.1
linux/arm64, go1.20, 055b2c3
[INFO] 127.0.0.1:51463 - 23570 "HINFO IN 6404155507127924057.1273287447177964912. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.026393207s
==> describe nodes <==
Name: old-k8s-version-264160
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=old-k8s-version-264160
kubernetes.io/os=linux
minikube.k8s.io/commit=08454a179ffa60c8ae500105aac58654b5cdef58
minikube.k8s.io/name=old-k8s-version-264160
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_19T22_36_06_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Wed, 19 Nov 2025 22:36:02 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-264160
AcquireTime: <unset>
RenewTime: Wed, 19 Nov 2025 22:36:46 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Wed, 19 Nov 2025 22:36:36 +0000 Wed, 19 Nov 2025 22:35:57 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 19 Nov 2025 22:36:36 +0000 Wed, 19 Nov 2025 22:35:57 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 19 Nov 2025 22:36:36 +0000 Wed, 19 Nov 2025 22:35:57 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 19 Nov 2025 22:36:36 +0000 Wed, 19 Nov 2025 22:36:31 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: old-k8s-version-264160
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 2de5c7cc592a67801eaa2fbe691dd049
System UUID: b680c3d2-ce1c-409c-bfdc-4a24b39315bd
Boot ID: b3875353-65b3-44b7-ad72-afadd7e2486a
Kernel Version: 5.15.0-1084-aws
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 10s
kube-system coredns-5dd5756b68-vz7zx 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 36s
kube-system etcd-old-k8s-version-264160 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 48s
kube-system kindnet-m9nqq 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 36s
kube-system kube-apiserver-old-k8s-version-264160 250m (12%) 0 (0%) 0 (0%) 0 (0%) 49s
kube-system kube-controller-manager-old-k8s-version-264160 200m (10%) 0 (0%) 0 (0%) 0 (0%) 48s
kube-system kube-proxy-zzmnr 0 (0%) 0 (0%) 0 (0%) 0 (0%) 36s
kube-system kube-scheduler-old-k8s-version-264160 100m (5%) 0 (0%) 0 (0%) 0 (0%) 50s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 100m (5%)
memory 220Mi (2%) 220Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 34s kube-proxy
Normal NodeHasSufficientMemory 58s (x8 over 58s) kubelet Node old-k8s-version-264160 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 58s (x8 over 58s) kubelet Node old-k8s-version-264160 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 58s (x7 over 58s) kubelet Node old-k8s-version-264160 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 58s kubelet Updated Node Allocatable limit across pods
Normal Starting 48s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 48s kubelet Node old-k8s-version-264160 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 48s kubelet Node old-k8s-version-264160 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 48s kubelet Node old-k8s-version-264160 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 48s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 37s node-controller Node old-k8s-version-264160 event: Registered Node old-k8s-version-264160 in Controller
Normal NodeReady 22s kubelet Node old-k8s-version-264160 status is now: NodeReady
==> dmesg <==
[Nov19 21:17] ACPI: SRAT not present
[ +0.000000] ACPI: SRAT not present
[ +0.000000] SPI driver altr_a10sr has no spi_device_id for altr,a10sr
[ +0.032038] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[Nov19 21:18] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.034282] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +0.730183] ena 0000:00:05.0: LLQ is not supported Fallback to host mode policy.
[ +6.763794] kauditd_printk_skb: 36 callbacks suppressed
[Nov19 21:50] hrtimer: interrupt took 11278311 ns
==> etcd [85f86fccea0828d06ebe49ecd748897b5c79764ef02605e9b0dcfe4d0da55086] <==
{"level":"info","ts":"2025-11-19T22:35:56.498496Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 switched to configuration voters=(16896983918768216326)"}
{"level":"info","ts":"2025-11-19T22:35:56.498586Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","added-peer-id":"ea7e25599daad906","added-peer-peer-urls":["https://192.168.76.2:2380"]}
{"level":"info","ts":"2025-11-19T22:35:56.499212Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-11-19T22:35:56.499356Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-11-19T22:35:56.49937Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-11-19T22:35:56.500029Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"ea7e25599daad906","initial-advertise-peer-urls":["https://192.168.76.2:2380"],"listen-peer-urls":["https://192.168.76.2:2380"],"advertise-client-urls":["https://192.168.76.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.76.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-19T22:35:56.500058Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-19T22:35:57.378189Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-19T22:35:57.378405Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-19T22:35:57.378513Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgPreVoteResp from ea7e25599daad906 at term 1"}
{"level":"info","ts":"2025-11-19T22:35:57.378604Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became candidate at term 2"}
{"level":"info","ts":"2025-11-19T22:35:57.378648Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgVoteResp from ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-19T22:35:57.378765Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became leader at term 2"}
{"level":"info","ts":"2025-11-19T22:35:57.378861Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: ea7e25599daad906 elected leader ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-19T22:35:57.380547Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:35:57.381686Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"ea7e25599daad906","local-member-attributes":"{Name:old-k8s-version-264160 ClientURLs:[https://192.168.76.2:2379]}","request-path":"/0/members/ea7e25599daad906/attributes","cluster-id":"6f20f2c4b2fb5f8a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-19T22:35:57.381779Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-19T22:35:57.385726Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:35:57.385955Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:35:57.386049Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:35:57.386901Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-19T22:35:57.38702Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-19T22:35:57.387495Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-19T22:35:57.38756Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-19T22:35:57.38824Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.76.2:2379"}
==> kernel <==
22:36:53 up 1:18, 0 user, load average: 2.22, 3.50, 2.75
Linux old-k8s-version-264160 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [3dc4045566ee801891a80913f3c0d08405af235938655312d13ffdb5bece221c] <==
I1119 22:36:20.789101 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1119 22:36:20.789364 1 main.go:139] hostIP = 192.168.76.2
podIP = 192.168.76.2
I1119 22:36:20.789559 1 main.go:148] setting mtu 1500 for CNI
I1119 22:36:20.789578 1 main.go:178] kindnetd IP family: "ipv4"
I1119 22:36:20.789592 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-19T22:36:20Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1119 22:36:20.990706 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1119 22:36:20.990731 1 controller.go:381] "Waiting for informer caches to sync"
I1119 22:36:20.990740 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1119 22:36:20.992039 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1119 22:36:21.190870 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1119 22:36:21.190976 1 metrics.go:72] Registering metrics
I1119 22:36:21.191093 1 controller.go:711] "Syncing nftables rules"
I1119 22:36:30.994216 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1119 22:36:30.994256 1 main.go:301] handling current node
I1119 22:36:40.992854 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1119 22:36:40.992893 1 main.go:301] handling current node
I1119 22:36:50.992386 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1119 22:36:50.992422 1 main.go:301] handling current node
==> kube-apiserver [83a25278b16a7bc6a4252ba6f8c2ce8a60621e9d435c828ededf66aecfda2443] <==
I1119 22:36:02.053875 1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
I1119 22:36:02.055155 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1119 22:36:02.055381 1 shared_informer.go:318] Caches are synced for configmaps
I1119 22:36:02.055593 1 aggregator.go:166] initial CRD sync complete...
I1119 22:36:02.055613 1 autoregister_controller.go:141] Starting autoregister controller
I1119 22:36:02.055620 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1119 22:36:02.055627 1 cache.go:39] Caches are synced for autoregister controller
I1119 22:36:02.066246 1 controller.go:624] quota admission added evaluator for: namespaces
I1119 22:36:02.090717 1 shared_informer.go:318] Caches are synced for node_authorizer
I1119 22:36:02.094391 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1119 22:36:02.747612 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1119 22:36:02.754129 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1119 22:36:02.754179 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1119 22:36:03.457051 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1119 22:36:03.510012 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1119 22:36:03.578204 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1119 22:36:03.591054 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I1119 22:36:03.592389 1 controller.go:624] quota admission added evaluator for: endpoints
I1119 22:36:03.598109 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1119 22:36:03.932055 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1119 22:36:05.470569 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1119 22:36:05.488449 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1119 22:36:05.503361 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1119 22:36:17.195970 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1119 22:36:17.744558 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [9ce9313d9aae43100c1f669a0216b1ce028ec3fd90f9042e2780602b3b9dabcf] <==
I1119 22:36:17.007432 1 event.go:307] "Event occurred" object="kube-system/kube-scheduler-old-k8s-version-264160" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1119 22:36:17.007693 1 event.go:307] "Event occurred" object="kube-system/kube-controller-manager-old-k8s-version-264160" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1119 22:36:17.202717 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1119 22:36:17.309848 1 shared_informer.go:318] Caches are synced for garbage collector
I1119 22:36:17.309885 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1119 22:36:17.345695 1 shared_informer.go:318] Caches are synced for garbage collector
I1119 22:36:17.758353 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-m9nqq"
I1119 22:36:17.771209 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-zzmnr"
I1119 22:36:17.833691 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-vz7zx"
I1119 22:36:17.844755 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-qtkkx"
I1119 22:36:17.870437 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="668.610241ms"
I1119 22:36:17.886833 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="15.893452ms"
I1119 22:36:17.887202 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="257.726µs"
I1119 22:36:17.895692 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="94.147µs"
I1119 22:36:19.212001 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1119 22:36:19.246883 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-qtkkx"
I1119 22:36:19.269962 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="59.063512ms"
I1119 22:36:19.286597 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="16.59033ms"
I1119 22:36:19.287055 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="114.554µs"
I1119 22:36:31.144412 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="61.433µs"
I1119 22:36:31.166398 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="94.171µs"
I1119 22:36:31.900825 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="227.679µs"
I1119 22:36:31.988585 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
I1119 22:36:41.472572 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="12.52455ms"
I1119 22:36:41.472677 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="53.621µs"
==> kube-proxy [e5c22c9877dd10241d18184894e9e614c72ec9cfb5a007bdae07416884620fcb] <==
I1119 22:36:18.757438 1 server_others.go:69] "Using iptables proxy"
I1119 22:36:18.778593 1 node.go:141] Successfully retrieved node IP: 192.168.76.2
I1119 22:36:18.913376 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1119 22:36:18.915584 1 server_others.go:152] "Using iptables Proxier"
I1119 22:36:18.915624 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1119 22:36:18.915633 1 server_others.go:438] "Defaulting to no-op detect-local"
I1119 22:36:18.915677 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1119 22:36:18.915959 1 server.go:846] "Version info" version="v1.28.0"
I1119 22:36:18.915974 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1119 22:36:18.920244 1 config.go:188] "Starting service config controller"
I1119 22:36:18.920284 1 shared_informer.go:311] Waiting for caches to sync for service config
I1119 22:36:18.920312 1 config.go:97] "Starting endpoint slice config controller"
I1119 22:36:18.920331 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1119 22:36:18.920900 1 config.go:315] "Starting node config controller"
I1119 22:36:18.920980 1 shared_informer.go:311] Waiting for caches to sync for node config
I1119 22:36:19.021747 1 shared_informer.go:318] Caches are synced for node config
I1119 22:36:19.021777 1 shared_informer.go:318] Caches are synced for service config
I1119 22:36:19.021803 1 shared_informer.go:318] Caches are synced for endpoint slice config
==> kube-scheduler [0aa1bd28b60733799ab92c2d108b32fc31d28ba32f45f38e766395ec615ed220] <==
W1119 22:36:02.058403 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1119 22:36:02.058916 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1119 22:36:02.058451 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1119 22:36:02.058979 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1119 22:36:02.058497 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1119 22:36:02.059039 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1119 22:36:02.058567 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1119 22:36:02.059110 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1119 22:36:02.058600 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1119 22:36:02.059179 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1119 22:36:02.058632 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1119 22:36:02.059241 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1119 22:36:02.878544 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1119 22:36:02.878578 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W1119 22:36:02.913574 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1119 22:36:02.913618 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1119 22:36:02.963123 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1119 22:36:02.963158 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1119 22:36:03.017826 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1119 22:36:03.018067 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1119 22:36:03.127020 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1119 22:36:03.127294 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1119 22:36:03.201758 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1119 22:36:03.202031 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
I1119 22:36:05.139254 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 19 22:36:16 old-k8s-version-264160 kubelet[1553]: I1119 22:36:16.880132 1553 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.764724 1553 topology_manager.go:215] "Topology Admit Handler" podUID="2f9f6fbb-c725-49fd-ba3a-c84a7640aac2" podNamespace="kube-system" podName="kindnet-m9nqq"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.781987 1553 topology_manager.go:215] "Topology Admit Handler" podUID="3ee1645f-fba5-4206-bb83-70d298a4c5ac" podNamespace="kube-system" podName="kube-proxy-zzmnr"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828089 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/2f9f6fbb-c725-49fd-ba3a-c84a7640aac2-xtables-lock\") pod \"kindnet-m9nqq\" (UID: \"2f9f6fbb-c725-49fd-ba3a-c84a7640aac2\") " pod="kube-system/kindnet-m9nqq"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828147 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/3ee1645f-fba5-4206-bb83-70d298a4c5ac-kube-proxy\") pod \"kube-proxy-zzmnr\" (UID: \"3ee1645f-fba5-4206-bb83-70d298a4c5ac\") " pod="kube-system/kube-proxy-zzmnr"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828177 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvk49\" (UniqueName: \"kubernetes.io/projected/2f9f6fbb-c725-49fd-ba3a-c84a7640aac2-kube-api-access-kvk49\") pod \"kindnet-m9nqq\" (UID: \"2f9f6fbb-c725-49fd-ba3a-c84a7640aac2\") " pod="kube-system/kindnet-m9nqq"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828200 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/3ee1645f-fba5-4206-bb83-70d298a4c5ac-xtables-lock\") pod \"kube-proxy-zzmnr\" (UID: \"3ee1645f-fba5-4206-bb83-70d298a4c5ac\") " pod="kube-system/kube-proxy-zzmnr"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828223 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3ee1645f-fba5-4206-bb83-70d298a4c5ac-lib-modules\") pod \"kube-proxy-zzmnr\" (UID: \"3ee1645f-fba5-4206-bb83-70d298a4c5ac\") " pod="kube-system/kube-proxy-zzmnr"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828251 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/2f9f6fbb-c725-49fd-ba3a-c84a7640aac2-lib-modules\") pod \"kindnet-m9nqq\" (UID: \"2f9f6fbb-c725-49fd-ba3a-c84a7640aac2\") " pod="kube-system/kindnet-m9nqq"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828274 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/2f9f6fbb-c725-49fd-ba3a-c84a7640aac2-cni-cfg\") pod \"kindnet-m9nqq\" (UID: \"2f9f6fbb-c725-49fd-ba3a-c84a7640aac2\") " pod="kube-system/kindnet-m9nqq"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828297 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fc7w4\" (UniqueName: \"kubernetes.io/projected/3ee1645f-fba5-4206-bb83-70d298a4c5ac-kube-api-access-fc7w4\") pod \"kube-proxy-zzmnr\" (UID: \"3ee1645f-fba5-4206-bb83-70d298a4c5ac\") " pod="kube-system/kube-proxy-zzmnr"
Nov 19 22:36:20 old-k8s-version-264160 kubelet[1553]: I1119 22:36:20.875429 1553 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-m9nqq" podStartSLOduration=1.9015295540000001 podCreationTimestamp="2025-11-19 22:36:17 +0000 UTC" firstStartedPulling="2025-11-19 22:36:18.551512561 +0000 UTC m=+13.118052946" lastFinishedPulling="2025-11-19 22:36:20.525369265 +0000 UTC m=+15.091909650" observedRunningTime="2025-11-19 22:36:20.875315381 +0000 UTC m=+15.441855783" watchObservedRunningTime="2025-11-19 22:36:20.875386258 +0000 UTC m=+15.441926643"
Nov 19 22:36:20 old-k8s-version-264160 kubelet[1553]: I1119 22:36:20.876203 1553 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-zzmnr" podStartSLOduration=3.87615718 podCreationTimestamp="2025-11-19 22:36:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-19 22:36:18.872222396 +0000 UTC m=+13.438762780" watchObservedRunningTime="2025-11-19 22:36:20.87615718 +0000 UTC m=+15.442697581"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.092782 1553 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.139366 1553 topology_manager.go:215] "Topology Admit Handler" podUID="7e7645ad-49a9-4f0c-89cc-128538e4d95c" podNamespace="kube-system" podName="coredns-5dd5756b68-vz7zx"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.152446 1553 topology_manager.go:215] "Topology Admit Handler" podUID="8e2dda77-5a6d-4796-926b-5a06158f8cdf" podNamespace="kube-system" podName="storage-provisioner"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.233967 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7e7645ad-49a9-4f0c-89cc-128538e4d95c-config-volume\") pod \"coredns-5dd5756b68-vz7zx\" (UID: \"7e7645ad-49a9-4f0c-89cc-128538e4d95c\") " pod="kube-system/coredns-5dd5756b68-vz7zx"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.234065 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkc9q\" (UniqueName: \"kubernetes.io/projected/7e7645ad-49a9-4f0c-89cc-128538e4d95c-kube-api-access-pkc9q\") pod \"coredns-5dd5756b68-vz7zx\" (UID: \"7e7645ad-49a9-4f0c-89cc-128538e4d95c\") " pod="kube-system/coredns-5dd5756b68-vz7zx"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.234125 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/8e2dda77-5a6d-4796-926b-5a06158f8cdf-tmp\") pod \"storage-provisioner\" (UID: \"8e2dda77-5a6d-4796-926b-5a06158f8cdf\") " pod="kube-system/storage-provisioner"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.234229 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dt4z\" (UniqueName: \"kubernetes.io/projected/8e2dda77-5a6d-4796-926b-5a06158f8cdf-kube-api-access-4dt4z\") pod \"storage-provisioner\" (UID: \"8e2dda77-5a6d-4796-926b-5a06158f8cdf\") " pod="kube-system/storage-provisioner"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.928942 1553 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-vz7zx" podStartSLOduration=14.928898879 podCreationTimestamp="2025-11-19 22:36:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-19 22:36:31.902288078 +0000 UTC m=+26.468828471" watchObservedRunningTime="2025-11-19 22:36:31.928898879 +0000 UTC m=+26.495439272"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.929197 1553 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=12.929173877 podCreationTimestamp="2025-11-19 22:36:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-19 22:36:31.926923668 +0000 UTC m=+26.493464217" watchObservedRunningTime="2025-11-19 22:36:31.929173877 +0000 UTC m=+26.495714286"
Nov 19 22:36:43 old-k8s-version-264160 kubelet[1553]: I1119 22:36:43.392110 1553 topology_manager.go:215] "Topology Admit Handler" podUID="2af6deb4-937f-4b9b-9de6-995e75a080b8" podNamespace="default" podName="busybox"
Nov 19 22:36:43 old-k8s-version-264160 kubelet[1553]: I1119 22:36:43.523830 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kb7ph\" (UniqueName: \"kubernetes.io/projected/2af6deb4-937f-4b9b-9de6-995e75a080b8-kube-api-access-kb7ph\") pod \"busybox\" (UID: \"2af6deb4-937f-4b9b-9de6-995e75a080b8\") " pod="default/busybox"
Nov 19 22:36:46 old-k8s-version-264160 kubelet[1553]: I1119 22:36:46.935525 1553 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.727293354 podCreationTimestamp="2025-11-19 22:36:43 +0000 UTC" firstStartedPulling="2025-11-19 22:36:43.851422103 +0000 UTC m=+38.417962488" lastFinishedPulling="2025-11-19 22:36:46.059604676 +0000 UTC m=+40.626145060" observedRunningTime="2025-11-19 22:36:46.934118134 +0000 UTC m=+41.500658519" watchObservedRunningTime="2025-11-19 22:36:46.935475926 +0000 UTC m=+41.502016319"
==> storage-provisioner [f62b743b6725ec9ff1e91e664da6c9ce15d837afbab3608cc02fec3c9bd3d929] <==
I1119 22:36:31.737660 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1119 22:36:31.757257 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1119 22:36:31.757310 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1119 22:36:31.769006 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1119 22:36:31.771663 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-264160_88781c45-d0c6-484e-abf4-8c2df680f8d6!
I1119 22:36:31.772385 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"62b15298-f39b-43d5-9d35-ddeafad4bd4d", APIVersion:"v1", ResourceVersion:"442", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-264160_88781c45-d0c6-484e-abf4-8c2df680f8d6 became leader
I1119 22:36:31.872085 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-264160_88781c45-d0c6-484e-abf4-8c2df680f8d6!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-264160 -n old-k8s-version-264160
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-264160 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-264160
helpers_test.go:243: (dbg) docker inspect old-k8s-version-264160:
-- stdout --
[
{
"Id": "49717cdd4541256c61f8dce96738708ef0a5263ed6216dabb995ea611616d37a",
"Created": "2025-11-19T22:35:36.829393211Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 205037,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-19T22:35:36.889026709Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:161ae512ea03f95c595a46a20f1dbd1d1e737c6a82df3ed673e089531af665da",
"ResolvConfPath": "/var/lib/docker/containers/49717cdd4541256c61f8dce96738708ef0a5263ed6216dabb995ea611616d37a/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/49717cdd4541256c61f8dce96738708ef0a5263ed6216dabb995ea611616d37a/hostname",
"HostsPath": "/var/lib/docker/containers/49717cdd4541256c61f8dce96738708ef0a5263ed6216dabb995ea611616d37a/hosts",
"LogPath": "/var/lib/docker/containers/49717cdd4541256c61f8dce96738708ef0a5263ed6216dabb995ea611616d37a/49717cdd4541256c61f8dce96738708ef0a5263ed6216dabb995ea611616d37a-json.log",
"Name": "/old-k8s-version-264160",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-264160:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "old-k8s-version-264160",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "49717cdd4541256c61f8dce96738708ef0a5263ed6216dabb995ea611616d37a",
"LowerDir": "/var/lib/docker/overlay2/feff7a4e723e18389dcb4a6f7e089bff4aeb566c5b553ed60b078e825f1fd0a8-init/diff:/var/lib/docker/overlay2/b6ebc9601ea0ae08484f263713f3358dd93f7748ebfafbd9155229908dee9606/diff",
"MergedDir": "/var/lib/docker/overlay2/feff7a4e723e18389dcb4a6f7e089bff4aeb566c5b553ed60b078e825f1fd0a8/merged",
"UpperDir": "/var/lib/docker/overlay2/feff7a4e723e18389dcb4a6f7e089bff4aeb566c5b553ed60b078e825f1fd0a8/diff",
"WorkDir": "/var/lib/docker/overlay2/feff7a4e723e18389dcb4a6f7e089bff4aeb566c5b553ed60b078e825f1fd0a8/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "old-k8s-version-264160",
"Source": "/var/lib/docker/volumes/old-k8s-version-264160/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "old-k8s-version-264160",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-264160",
"name.minikube.sigs.k8s.io": "old-k8s-version-264160",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "1c6d7c0f5ea4187c0bdb74e6f6190f3c956a222d61984cbd94ed19e45025d4c9",
"SandboxKey": "/var/run/docker/netns/1c6d7c0f5ea4",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33054"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33055"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33058"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33056"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33057"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"old-k8s-version-264160": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "52:a5:ad:7a:8b:5a",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "b720c74a0dc38658463082bcb93730b420d57f391d495ecb21d74f5ad35b4f21",
"EndpointID": "4800aba7ded95ed95a56ef1ad4bf1b238d330afe47c91b66c43c80a2794b655c",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-264160",
"49717cdd4541"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-264160 -n old-k8s-version-264160
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p old-k8s-version-264160 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p old-k8s-version-264160 logs -n 25: (1.255192388s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────┬─────────┬─────────┬─────────────────────┬────────────
─────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────┼─────────┼─────────┼─────────────────────┼────────────
─────────┤
│ ssh │ -p cilium-156590 sudo cat /etc/docker/daemon.json │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo docker system info │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo systemctl status cri-docker --all --full --no-pager │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo systemctl cat cri-docker --no-pager │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo cri-dockerd --version │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo systemctl status containerd --all --full --no-pager │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo systemctl cat containerd --no-pager │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo cat /lib/systemd/system/containerd.service │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo cat /etc/containerd/config.toml │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo containerd config dump │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo systemctl status crio --all --full --no-pager │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo systemctl cat crio --no-pager │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ ssh │ -p cilium-156590 sudo crio config │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ │
│ delete │ -p cilium-156590 │ cilium-156590 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ 19 Nov 25 22:34 UTC │
│ start │ -p cert-expiration-750367 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=containerd │ cert-expiration-750367 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ 19 Nov 25 22:34 UTC │
│ ssh │ force-systemd-env-388402 ssh cat /etc/containerd/config.toml │ force-systemd-env-388402 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ 19 Nov 25 22:34 UTC │
│ delete │ -p force-systemd-env-388402 │ force-systemd-env-388402 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ 19 Nov 25 22:34 UTC │
│ start │ -p cert-options-815306 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-815306 │ jenkins │ v1.37.0 │ 19 Nov 25 22:34 UTC │ 19 Nov 25 22:35 UTC │
│ ssh │ cert-options-815306 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-815306 │ jenkins │ v1.37.0 │ 19 Nov 25 22:35 UTC │ 19 Nov 25 22:35 UTC │
│ ssh │ -p cert-options-815306 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-815306 │ jenkins │ v1.37.0 │ 19 Nov 25 22:35 UTC │ 19 Nov 25 22:35 UTC │
│ delete │ -p cert-options-815306 │ cert-options-815306 │ jenkins │ v1.37.0 │ 19 Nov 25 22:35 UTC │ 19 Nov 25 22:35 UTC │
│ start │ -p old-k8s-version-264160 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-264160 │ jenkins │ v1.37.0 │ 19 Nov 25 22:35 UTC │ 19 Nov 25 22:36 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────┴─────────┴─────────┴─────────────────────┴────────────
─────────┘
==> Last Start <==
Log file created at: 2025/11/19 22:35:30
Running on machine: ip-172-31-24-2
Binary: Built with gc go1.24.6 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1119 22:35:30.257107 204649 out.go:360] Setting OutFile to fd 1 ...
I1119 22:35:30.257270 204649 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1119 22:35:30.257288 204649 out.go:374] Setting ErrFile to fd 2...
I1119 22:35:30.257293 204649 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1119 22:35:30.257586 204649 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21918-2347/.minikube/bin
I1119 22:35:30.258032 204649 out.go:368] Setting JSON to false
I1119 22:35:30.259057 204649 start.go:133] hostinfo: {"hostname":"ip-172-31-24-2","uptime":4651,"bootTime":1763587079,"procs":189,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"6d436adf-771e-4269-b9a3-c25fd4fca4f5"}
I1119 22:35:30.259135 204649 start.go:143] virtualization:
I1119 22:35:30.265034 204649 out.go:179] * [old-k8s-version-264160] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I1119 22:35:30.268600 204649 out.go:179] - MINIKUBE_LOCATION=21918
I1119 22:35:30.268654 204649 notify.go:221] Checking for updates...
I1119 22:35:30.275244 204649 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1119 22:35:30.278424 204649 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21918-2347/kubeconfig
I1119 22:35:30.281805 204649 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21918-2347/.minikube
I1119 22:35:30.285044 204649 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I1119 22:35:30.288125 204649 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1119 22:35:30.291809 204649 config.go:182] Loaded profile config "cert-expiration-750367": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1119 22:35:30.291938 204649 driver.go:422] Setting default libvirt URI to qemu:///system
I1119 22:35:30.328984 204649 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I1119 22:35:30.329118 204649 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1119 22:35:30.391514 204649 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-19 22:35:30.382377652 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pat
h:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1119 22:35:30.391618 204649 docker.go:319] overlay module found
I1119 22:35:30.394904 204649 out.go:179] * Using the docker driver based on user configuration
I1119 22:35:30.397906 204649 start.go:309] selected driver: docker
I1119 22:35:30.397928 204649 start.go:930] validating driver "docker" against <nil>
I1119 22:35:30.397942 204649 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1119 22:35:30.398744 204649 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1119 22:35:30.457338 204649 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-19 22:35:30.447544183 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pat
h:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1119 22:35:30.457505 204649 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1119 22:35:30.457734 204649 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1119 22:35:30.460603 204649 out.go:179] * Using Docker driver with root privileges
I1119 22:35:30.463555 204649 cni.go:84] Creating CNI manager for ""
I1119 22:35:30.463623 204649 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1119 22:35:30.463636 204649 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1119 22:35:30.463716 204649 start.go:353] cluster config:
{Name:old-k8s-version-264160 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-264160 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSH
AuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1119 22:35:30.466849 204649 out.go:179] * Starting "old-k8s-version-264160" primary control-plane node in "old-k8s-version-264160" cluster
I1119 22:35:30.469744 204649 cache.go:134] Beginning downloading kic base image for docker with containerd
I1119 22:35:30.472743 204649 out.go:179] * Pulling base image v0.0.48-1763561786-21918 ...
I1119 22:35:30.475730 204649 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1119 22:35:30.475797 204649 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21918-2347/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4
I1119 22:35:30.475812 204649 cache.go:65] Caching tarball of preloaded images
I1119 22:35:30.475815 204649 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 in local docker daemon
I1119 22:35:30.475897 204649 preload.go:238] Found /home/jenkins/minikube-integration/21918-2347/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I1119 22:35:30.475907 204649 cache.go:68] Finished verifying existence of preloaded tar for v1.28.0 on containerd
I1119 22:35:30.476103 204649 profile.go:143] Saving config to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/config.json ...
I1119 22:35:30.476142 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/config.json: {Name:mka3956cf816ce3f0dc4b41766ded046d7e239b7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:30.495142 204649 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 in local docker daemon, skipping pull
I1119 22:35:30.495164 204649 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 exists in daemon, skipping load
I1119 22:35:30.495178 204649 cache.go:243] Successfully downloaded all kic artifacts
I1119 22:35:30.495202 204649 start.go:360] acquireMachinesLock for old-k8s-version-264160: {Name:mkb1d6d80392c055072776fe42d903323b85b557 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:35:30.495313 204649 start.go:364] duration metric: took 84.916µs to acquireMachinesLock for "old-k8s-version-264160"
I1119 22:35:30.495346 204649 start.go:93] Provisioning new machine with config: &{Name:old-k8s-version-264160 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-264160 Namespace:default APIServerHAVIP:
APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:f
alse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1119 22:35:30.495417 204649 start.go:125] createHost starting for "" (driver="docker")
I1119 22:35:30.498755 204649 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1119 22:35:30.499000 204649 start.go:159] libmachine.API.Create for "old-k8s-version-264160" (driver="docker")
I1119 22:35:30.499040 204649 client.go:173] LocalClient.Create starting
I1119 22:35:30.499112 204649 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca.pem
I1119 22:35:30.499148 204649 main.go:143] libmachine: Decoding PEM data...
I1119 22:35:30.499166 204649 main.go:143] libmachine: Parsing certificate...
I1119 22:35:30.499221 204649 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21918-2347/.minikube/certs/cert.pem
I1119 22:35:30.499243 204649 main.go:143] libmachine: Decoding PEM data...
I1119 22:35:30.499252 204649 main.go:143] libmachine: Parsing certificate...
I1119 22:35:30.499620 204649 cli_runner.go:164] Run: docker network inspect old-k8s-version-264160 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1119 22:35:30.514882 204649 cli_runner.go:211] docker network inspect old-k8s-version-264160 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1119 22:35:30.514967 204649 network_create.go:284] running [docker network inspect old-k8s-version-264160] to gather additional debugging logs...
I1119 22:35:30.514989 204649 cli_runner.go:164] Run: docker network inspect old-k8s-version-264160
W1119 22:35:30.529792 204649 cli_runner.go:211] docker network inspect old-k8s-version-264160 returned with exit code 1
I1119 22:35:30.529827 204649 network_create.go:287] error running [docker network inspect old-k8s-version-264160]: docker network inspect old-k8s-version-264160: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-264160 not found
I1119 22:35:30.529841 204649 network_create.go:289] output of [docker network inspect old-k8s-version-264160]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-264160 not found
** /stderr **
I1119 22:35:30.529955 204649 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1119 22:35:30.546966 204649 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-b0fa93c84379 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:8a:8f:4f:8f:5a:a3} reservation:<nil>}
I1119 22:35:30.547286 204649 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-141c656f658f IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:62:30:08:ea:1a:b9} reservation:<nil>}
I1119 22:35:30.547626 204649 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-aae633a5ffae IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:ca:73:d8:2e:30:94} reservation:<nil>}
I1119 22:35:30.548050 204649 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40019f9110}
I1119 22:35:30.548074 204649 network_create.go:124] attempt to create docker network old-k8s-version-264160 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I1119 22:35:30.548135 204649 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-264160 old-k8s-version-264160
I1119 22:35:30.612059 204649 network_create.go:108] docker network old-k8s-version-264160 192.168.76.0/24 created
I1119 22:35:30.612094 204649 kic.go:121] calculated static IP "192.168.76.2" for the "old-k8s-version-264160" container
I1119 22:35:30.612164 204649 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1119 22:35:30.629392 204649 cli_runner.go:164] Run: docker volume create old-k8s-version-264160 --label name.minikube.sigs.k8s.io=old-k8s-version-264160 --label created_by.minikube.sigs.k8s.io=true
I1119 22:35:30.648884 204649 oci.go:103] Successfully created a docker volume old-k8s-version-264160
I1119 22:35:30.648982 204649 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-264160-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-264160 --entrypoint /usr/bin/test -v old-k8s-version-264160:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 -d /var/lib
I1119 22:35:31.199519 204649 oci.go:107] Successfully prepared a docker volume old-k8s-version-264160
I1119 22:35:31.199605 204649 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1119 22:35:31.199622 204649 kic.go:194] Starting extracting preloaded images to volume ...
I1119 22:35:31.199697 204649 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21918-2347/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-264160:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 -I lz4 -xf /preloaded.tar -C /extractDir
I1119 22:35:36.761404 204649 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21918-2347/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-264160:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 -I lz4 -xf /preloaded.tar -C /extractDir: (5.561655508s)
I1119 22:35:36.761444 204649 kic.go:203] duration metric: took 5.561818243s to extract preloaded images to volume ...
W1119 22:35:36.761577 204649 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1119 22:35:36.761693 204649 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1119 22:35:36.815053 204649 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-264160 --name old-k8s-version-264160 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-264160 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-264160 --network old-k8s-version-264160 --ip 192.168.76.2 --volume old-k8s-version-264160:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865
I1119 22:35:37.145087 204649 cli_runner.go:164] Run: docker container inspect old-k8s-version-264160 --format={{.State.Running}}
I1119 22:35:37.171282 204649 cli_runner.go:164] Run: docker container inspect old-k8s-version-264160 --format={{.State.Status}}
I1119 22:35:37.199972 204649 cli_runner.go:164] Run: docker exec old-k8s-version-264160 stat /var/lib/dpkg/alternatives/iptables
I1119 22:35:37.254683 204649 oci.go:144] the created container "old-k8s-version-264160" has a running status.
I1119 22:35:37.254726 204649 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa...
I1119 22:35:38.063600 204649 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1119 22:35:38.084666 204649 cli_runner.go:164] Run: docker container inspect old-k8s-version-264160 --format={{.State.Status}}
I1119 22:35:38.103756 204649 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1119 22:35:38.103781 204649 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-264160 chown docker:docker /home/docker/.ssh/authorized_keys]
I1119 22:35:38.159199 204649 cli_runner.go:164] Run: docker container inspect old-k8s-version-264160 --format={{.State.Status}}
I1119 22:35:38.177494 204649 machine.go:94] provisionDockerMachine start ...
I1119 22:35:38.177599 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:38.195122 204649 main.go:143] libmachine: Using SSH client type: native
I1119 22:35:38.195453 204649 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 33054 <nil> <nil>}
I1119 22:35:38.195469 204649 main.go:143] libmachine: About to run SSH command:
hostname
I1119 22:35:38.196184 204649 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1119 22:35:41.337849 204649 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-264160
I1119 22:35:41.337872 204649 ubuntu.go:182] provisioning hostname "old-k8s-version-264160"
I1119 22:35:41.337936 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:41.356186 204649 main.go:143] libmachine: Using SSH client type: native
I1119 22:35:41.356488 204649 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 33054 <nil> <nil>}
I1119 22:35:41.356501 204649 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-264160 && echo "old-k8s-version-264160" | sudo tee /etc/hostname
I1119 22:35:41.512063 204649 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-264160
I1119 22:35:41.512155 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:41.531307 204649 main.go:143] libmachine: Using SSH client type: native
I1119 22:35:41.531635 204649 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 33054 <nil> <nil>}
I1119 22:35:41.531659 204649 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-264160' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-264160/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-264160' | sudo tee -a /etc/hosts;
fi
fi
I1119 22:35:41.674522 204649 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1119 22:35:41.674549 204649 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21918-2347/.minikube CaCertPath:/home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21918-2347/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21918-2347/.minikube}
I1119 22:35:41.674570 204649 ubuntu.go:190] setting up certificates
I1119 22:35:41.674581 204649 provision.go:84] configureAuth start
I1119 22:35:41.674640 204649 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-264160
I1119 22:35:41.694614 204649 provision.go:143] copyHostCerts
I1119 22:35:41.694682 204649 exec_runner.go:144] found /home/jenkins/minikube-integration/21918-2347/.minikube/key.pem, removing ...
I1119 22:35:41.694696 204649 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21918-2347/.minikube/key.pem
I1119 22:35:41.694778 204649 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21918-2347/.minikube/key.pem (1675 bytes)
I1119 22:35:41.694893 204649 exec_runner.go:144] found /home/jenkins/minikube-integration/21918-2347/.minikube/ca.pem, removing ...
I1119 22:35:41.694904 204649 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21918-2347/.minikube/ca.pem
I1119 22:35:41.694933 204649 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21918-2347/.minikube/ca.pem (1082 bytes)
I1119 22:35:41.694994 204649 exec_runner.go:144] found /home/jenkins/minikube-integration/21918-2347/.minikube/cert.pem, removing ...
I1119 22:35:41.695002 204649 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21918-2347/.minikube/cert.pem
I1119 22:35:41.695027 204649 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21918-2347/.minikube/cert.pem (1123 bytes)
I1119 22:35:41.695078 204649 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21918-2347/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-264160 san=[127.0.0.1 192.168.76.2 localhost minikube old-k8s-version-264160]
I1119 22:35:41.985138 204649 provision.go:177] copyRemoteCerts
I1119 22:35:41.985210 204649 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1119 22:35:41.985253 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:42.011744 204649 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33054 SSHKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa Username:docker}
I1119 22:35:42.120462 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1119 22:35:42.153941 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1119 22:35:42.177275 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1119 22:35:42.199768 204649 provision.go:87] duration metric: took 525.161639ms to configureAuth
I1119 22:35:42.199797 204649 ubuntu.go:206] setting minikube options for container-runtime
I1119 22:35:42.199999 204649 config.go:182] Loaded profile config "old-k8s-version-264160": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1119 22:35:42.200014 204649 machine.go:97] duration metric: took 4.022496163s to provisionDockerMachine
I1119 22:35:42.200022 204649 client.go:176] duration metric: took 11.700970491s to LocalClient.Create
I1119 22:35:42.200036 204649 start.go:167] duration metric: took 11.70103788s to libmachine.API.Create "old-k8s-version-264160"
I1119 22:35:42.200044 204649 start.go:293] postStartSetup for "old-k8s-version-264160" (driver="docker")
I1119 22:35:42.200053 204649 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1119 22:35:42.200107 204649 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1119 22:35:42.200153 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:42.221138 204649 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33054 SSHKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa Username:docker}
I1119 22:35:42.326805 204649 ssh_runner.go:195] Run: cat /etc/os-release
I1119 22:35:42.330396 204649 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1119 22:35:42.330426 204649 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1119 22:35:42.330439 204649 filesync.go:126] Scanning /home/jenkins/minikube-integration/21918-2347/.minikube/addons for local assets ...
I1119 22:35:42.330497 204649 filesync.go:126] Scanning /home/jenkins/minikube-integration/21918-2347/.minikube/files for local assets ...
I1119 22:35:42.330585 204649 filesync.go:149] local asset: /home/jenkins/minikube-integration/21918-2347/.minikube/files/etc/ssl/certs/41442.pem -> 41442.pem in /etc/ssl/certs
I1119 22:35:42.330694 204649 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1119 22:35:42.338569 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/files/etc/ssl/certs/41442.pem --> /etc/ssl/certs/41442.pem (1708 bytes)
I1119 22:35:42.358341 204649 start.go:296] duration metric: took 158.281623ms for postStartSetup
I1119 22:35:42.358732 204649 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-264160
I1119 22:35:42.376951 204649 profile.go:143] Saving config to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/config.json ...
I1119 22:35:42.377417 204649 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1119 22:35:42.377467 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:42.395134 204649 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33054 SSHKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa Username:docker}
I1119 22:35:42.495341 204649 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1119 22:35:42.499972 204649 start.go:128] duration metric: took 12.004539402s to createHost
I1119 22:35:42.500036 204649 start.go:83] releasing machines lock for "old-k8s-version-264160", held for 12.004707247s
I1119 22:35:42.500112 204649 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-264160
I1119 22:35:42.517291 204649 ssh_runner.go:195] Run: cat /version.json
I1119 22:35:42.517425 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:42.517727 204649 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1119 22:35:42.517817 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:35:42.538882 204649 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33054 SSHKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa Username:docker}
I1119 22:35:42.547918 204649 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33054 SSHKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa Username:docker}
I1119 22:35:42.646164 204649 ssh_runner.go:195] Run: systemctl --version
I1119 22:35:42.733875 204649 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1119 22:35:42.738275 204649 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1119 22:35:42.738377 204649 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1119 22:35:42.768357 204649 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I1119 22:35:42.768382 204649 start.go:496] detecting cgroup driver to use...
I1119 22:35:42.768416 204649 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1119 22:35:42.768467 204649 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1119 22:35:42.786112 204649 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1119 22:35:42.799389 204649 docker.go:218] disabling cri-docker service (if available) ...
I1119 22:35:42.799458 204649 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1119 22:35:42.817550 204649 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1119 22:35:42.837250 204649 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1119 22:35:42.954428 204649 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1119 22:35:43.089677 204649 docker.go:234] disabling docker service ...
I1119 22:35:43.089796 204649 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1119 22:35:43.119196 204649 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1119 22:35:43.133883 204649 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1119 22:35:43.271748 204649 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1119 22:35:43.403111 204649 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1119 22:35:43.416605 204649 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1119 22:35:43.431762 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1119 22:35:43.441044 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1119 22:35:43.450280 204649 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1119 22:35:43.450355 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1119 22:35:43.460541 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1119 22:35:43.469380 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1119 22:35:43.478023 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1119 22:35:43.486801 204649 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1119 22:35:43.495927 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1119 22:35:43.505431 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1119 22:35:43.514750 204649 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1119 22:35:43.524906 204649 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1119 22:35:43.533562 204649 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1119 22:35:43.541294 204649 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1119 22:35:43.666061 204649 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1119 22:35:43.801836 204649 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1119 22:35:43.801996 204649 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1119 22:35:43.807154 204649 start.go:564] Will wait 60s for crictl version
I1119 22:35:43.807283 204649 ssh_runner.go:195] Run: which crictl
I1119 22:35:43.810929 204649 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1119 22:35:43.840804 204649 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1119 22:35:43.840924 204649 ssh_runner.go:195] Run: containerd --version
I1119 22:35:43.863403 204649 ssh_runner.go:195] Run: containerd --version
I1119 22:35:43.892718 204649 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1119 22:35:43.895641 204649 cli_runner.go:164] Run: docker network inspect old-k8s-version-264160 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1119 22:35:43.912965 204649 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I1119 22:35:43.916790 204649 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1119 22:35:43.926772 204649 kubeadm.go:884] updating cluster {Name:old-k8s-version-264160 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-264160 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1119 22:35:43.926887 204649 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1119 22:35:43.926949 204649 ssh_runner.go:195] Run: sudo crictl images --output json
I1119 22:35:43.959370 204649 containerd.go:627] all images are preloaded for containerd runtime.
I1119 22:35:43.959391 204649 containerd.go:534] Images already preloaded, skipping extraction
I1119 22:35:43.959451 204649 ssh_runner.go:195] Run: sudo crictl images --output json
I1119 22:35:43.989251 204649 containerd.go:627] all images are preloaded for containerd runtime.
I1119 22:35:43.989276 204649 cache_images.go:86] Images are preloaded, skipping loading
I1119 22:35:43.989284 204649 kubeadm.go:935] updating node { 192.168.76.2 8443 v1.28.0 containerd true true} ...
I1119 22:35:43.989377 204649 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-264160 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-264160 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1119 22:35:43.989454 204649 ssh_runner.go:195] Run: sudo crictl info
I1119 22:35:44.018509 204649 cni.go:84] Creating CNI manager for ""
I1119 22:35:44.018532 204649 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1119 22:35:44.018554 204649 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1119 22:35:44.018590 204649 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-264160 NodeName:old-k8s-version-264160 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt
StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1119 22:35:44.018720 204649 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-264160"
kubeletExtraArgs:
node-ip: 192.168.76.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1119 22:35:44.018791 204649 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1119 22:35:44.027774 204649 binaries.go:51] Found k8s binaries, skipping transfer
I1119 22:35:44.027843 204649 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1119 22:35:44.035977 204649 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1119 22:35:44.049828 204649 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1119 22:35:44.063834 204649 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2176 bytes)
I1119 22:35:44.078459 204649 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I1119 22:35:44.082544 204649 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1119 22:35:44.093549 204649 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1119 22:35:44.218127 204649 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1119 22:35:44.238847 204649 certs.go:69] Setting up /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160 for IP: 192.168.76.2
I1119 22:35:44.238867 204649 certs.go:195] generating shared ca certs ...
I1119 22:35:44.238885 204649 certs.go:227] acquiring lock for ca certs: {Name:mk76285c445bf14c1e73dedba3201c9181209ff4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:44.239062 204649 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21918-2347/.minikube/ca.key
I1119 22:35:44.239112 204649 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21918-2347/.minikube/proxy-client-ca.key
I1119 22:35:44.239124 204649 certs.go:257] generating profile certs ...
I1119 22:35:44.239186 204649 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/client.key
I1119 22:35:44.239203 204649 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/client.crt with IP's: []
I1119 22:35:44.811737 204649 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/client.crt ...
I1119 22:35:44.811764 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/client.crt: {Name:mk14e11ecda6c7214508a5ade0f9ee915e780f3e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:44.811951 204649 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/client.key ...
I1119 22:35:44.811960 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/client.key: {Name:mk0adfc8036cdd3c163e4cffd5e262cb5308dfe9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:44.812038 204649 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.key.955d0b5b
I1119 22:35:44.812063 204649 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.crt.955d0b5b with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I1119 22:35:45.101024 204649 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.crt.955d0b5b ...
I1119 22:35:45.101056 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.crt.955d0b5b: {Name:mk5142ac1d579327ae160e83fc7f68b0f3557595 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:45.101255 204649 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.key.955d0b5b ...
I1119 22:35:45.101267 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.key.955d0b5b: {Name:mkc12bee6747eface51cd5e77da3f942ad5e5618 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:45.101361 204649 certs.go:382] copying /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.crt.955d0b5b -> /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.crt
I1119 22:35:45.101462 204649 certs.go:386] copying /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.key.955d0b5b -> /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.key
I1119 22:35:45.101522 204649 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.key
I1119 22:35:45.101539 204649 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.crt with IP's: []
I1119 22:35:45.832941 204649 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.crt ...
I1119 22:35:45.832971 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.crt: {Name:mk306cbc09a8a4cdf49bd23a7f735885d2e6d6d8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:45.833166 204649 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.key ...
I1119 22:35:45.833185 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.key: {Name:mk51455941ef13941a00f8719c0c4a50b2eaa3aa Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:35:45.833395 204649 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/4144.pem (1338 bytes)
W1119 22:35:45.833433 204649 certs.go:480] ignoring /home/jenkins/minikube-integration/21918-2347/.minikube/certs/4144_empty.pem, impossibly tiny 0 bytes
I1119 22:35:45.833442 204649 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca-key.pem (1675 bytes)
I1119 22:35:45.833468 204649 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/ca.pem (1082 bytes)
I1119 22:35:45.833497 204649 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/cert.pem (1123 bytes)
I1119 22:35:45.833529 204649 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-2347/.minikube/certs/key.pem (1675 bytes)
I1119 22:35:45.833577 204649 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-2347/.minikube/files/etc/ssl/certs/41442.pem (1708 bytes)
I1119 22:35:45.834165 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1119 22:35:45.856349 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1671 bytes)
I1119 22:35:45.877913 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1119 22:35:45.896516 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1119 22:35:45.914586 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1119 22:35:45.933361 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1119 22:35:45.951038 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1119 22:35:45.973047 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/profiles/old-k8s-version-264160/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1119 22:35:45.994027 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1119 22:35:46.025730 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/certs/4144.pem --> /usr/share/ca-certificates/4144.pem (1338 bytes)
I1119 22:35:46.045750 204649 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-2347/.minikube/files/etc/ssl/certs/41442.pem --> /usr/share/ca-certificates/41442.pem (1708 bytes)
I1119 22:35:46.073629 204649 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1119 22:35:46.087614 204649 ssh_runner.go:195] Run: openssl version
I1119 22:35:46.094872 204649 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1119 22:35:46.103931 204649 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1119 22:35:46.108400 204649 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 19 21:49 /usr/share/ca-certificates/minikubeCA.pem
I1119 22:35:46.108519 204649 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1119 22:35:46.165543 204649 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1119 22:35:46.174470 204649 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/4144.pem && ln -fs /usr/share/ca-certificates/4144.pem /etc/ssl/certs/4144.pem"
I1119 22:35:46.182680 204649 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/4144.pem
I1119 22:35:46.186577 204649 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 19 21:56 /usr/share/ca-certificates/4144.pem
I1119 22:35:46.186637 204649 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/4144.pem
I1119 22:35:46.228043 204649 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/4144.pem /etc/ssl/certs/51391683.0"
I1119 22:35:46.236269 204649 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/41442.pem && ln -fs /usr/share/ca-certificates/41442.pem /etc/ssl/certs/41442.pem"
I1119 22:35:46.244687 204649 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/41442.pem
I1119 22:35:46.248576 204649 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 19 21:56 /usr/share/ca-certificates/41442.pem
I1119 22:35:46.248696 204649 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/41442.pem
I1119 22:35:46.290804 204649 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/41442.pem /etc/ssl/certs/3ec20f2e.0"
I1119 22:35:46.299091 204649 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1119 22:35:46.302689 204649 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1119 22:35:46.302790 204649 kubeadm.go:401] StartCluster: {Name:old-k8s-version-264160 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-264160 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1119 22:35:46.302872 204649 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1119 22:35:46.302930 204649 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1119 22:35:46.341874 204649 cri.go:89] found id: ""
I1119 22:35:46.341955 204649 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1119 22:35:46.349861 204649 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1119 22:35:46.358624 204649 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1119 22:35:46.358700 204649 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1119 22:35:46.366859 204649 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1119 22:35:46.366882 204649 kubeadm.go:158] found existing configuration files:
I1119 22:35:46.366956 204649 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1119 22:35:46.375053 204649 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1119 22:35:46.375118 204649 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1119 22:35:46.382569 204649 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1119 22:35:46.390549 204649 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1119 22:35:46.390660 204649 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1119 22:35:46.398378 204649 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1119 22:35:46.406002 204649 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1119 22:35:46.406127 204649 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1119 22:35:46.414558 204649 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1119 22:35:46.422462 204649 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1119 22:35:46.422528 204649 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1119 22:35:46.430234 204649 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1119 22:35:46.480821 204649 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1119 22:35:46.480973 204649 kubeadm.go:319] [preflight] Running pre-flight checks
I1119 22:35:46.518306 204649 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1119 22:35:46.518408 204649 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I1119 22:35:46.518469 204649 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1119 22:35:46.518555 204649 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1119 22:35:46.518627 204649 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1119 22:35:46.518704 204649 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1119 22:35:46.518775 204649 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1119 22:35:46.518848 204649 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1119 22:35:46.518928 204649 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1119 22:35:46.518993 204649 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1119 22:35:46.519065 204649 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1119 22:35:46.519136 204649 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1119 22:35:46.603387 204649 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1119 22:35:46.603532 204649 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1119 22:35:46.603659 204649 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1119 22:35:46.748614 204649 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1119 22:35:46.754520 204649 out.go:252] - Generating certificates and keys ...
I1119 22:35:46.754636 204649 kubeadm.go:319] [certs] Using existing ca certificate authority
I1119 22:35:46.754728 204649 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1119 22:35:47.362621 204649 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1119 22:35:47.861152 204649 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1119 22:35:48.578567 204649 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1119 22:35:48.709308 204649 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1119 22:35:49.572586 204649 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1119 22:35:49.572742 204649 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-264160] and IPs [192.168.76.2 127.0.0.1 ::1]
I1119 22:35:50.286968 204649 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1119 22:35:50.287350 204649 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-264160] and IPs [192.168.76.2 127.0.0.1 ::1]
I1119 22:35:50.729163 204649 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1119 22:35:51.087355 204649 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1119 22:35:51.301494 204649 kubeadm.go:319] [certs] Generating "sa" key and public key
I1119 22:35:51.301799 204649 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1119 22:35:52.439151 204649 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1119 22:35:52.767854 204649 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1119 22:35:53.170174 204649 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1119 22:35:53.873745 204649 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1119 22:35:53.874592 204649 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1119 22:35:53.877867 204649 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1119 22:35:53.883494 204649 out.go:252] - Booting up control plane ...
I1119 22:35:53.883605 204649 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1119 22:35:53.883687 204649 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1119 22:35:53.883756 204649 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1119 22:35:53.900950 204649 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1119 22:35:53.901278 204649 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1119 22:35:53.901523 204649 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1119 22:35:54.050697 204649 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1119 22:36:04.052724 204649 kubeadm.go:319] [apiclient] All control plane components are healthy after 10.003761 seconds
I1119 22:36:04.052869 204649 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1119 22:36:04.072130 204649 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1119 22:36:04.605781 204649 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1119 22:36:04.606002 204649 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-264160 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1119 22:36:05.122165 204649 kubeadm.go:319] [bootstrap-token] Using token: t3hgjm.t27pk8uf8r4mqrko
I1119 22:36:05.125207 204649 out.go:252] - Configuring RBAC rules ...
I1119 22:36:05.125347 204649 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1119 22:36:05.138372 204649 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1119 22:36:05.149292 204649 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1119 22:36:05.153962 204649 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1119 22:36:05.159111 204649 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1119 22:36:05.163924 204649 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1119 22:36:05.183969 204649 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1119 22:36:05.490668 204649 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1119 22:36:05.544743 204649 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1119 22:36:05.545712 204649 kubeadm.go:319]
I1119 22:36:05.545794 204649 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1119 22:36:05.545800 204649 kubeadm.go:319]
I1119 22:36:05.545881 204649 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1119 22:36:05.545886 204649 kubeadm.go:319]
I1119 22:36:05.545912 204649 kubeadm.go:319] mkdir -p $HOME/.kube
I1119 22:36:05.545975 204649 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1119 22:36:05.546029 204649 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1119 22:36:05.546036 204649 kubeadm.go:319]
I1119 22:36:05.546092 204649 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1119 22:36:05.546097 204649 kubeadm.go:319]
I1119 22:36:05.546192 204649 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1119 22:36:05.546198 204649 kubeadm.go:319]
I1119 22:36:05.546252 204649 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1119 22:36:05.546330 204649 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1119 22:36:05.546401 204649 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1119 22:36:05.546405 204649 kubeadm.go:319]
I1119 22:36:05.546493 204649 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1119 22:36:05.546572 204649 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1119 22:36:05.546577 204649 kubeadm.go:319]
I1119 22:36:05.546665 204649 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token t3hgjm.t27pk8uf8r4mqrko \
I1119 22:36:05.546773 204649 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:f3dc8233c963d7fa33b7a72da6102de3e0dbc1bf6e99b77f8426922389e565f9 \
I1119 22:36:05.546794 204649 kubeadm.go:319] --control-plane
I1119 22:36:05.546798 204649 kubeadm.go:319]
I1119 22:36:05.546886 204649 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1119 22:36:05.546890 204649 kubeadm.go:319]
I1119 22:36:05.546975 204649 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token t3hgjm.t27pk8uf8r4mqrko \
I1119 22:36:05.547080 204649 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:f3dc8233c963d7fa33b7a72da6102de3e0dbc1bf6e99b77f8426922389e565f9
I1119 22:36:05.551148 204649 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I1119 22:36:05.551265 204649 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1119 22:36:05.551281 204649 cni.go:84] Creating CNI manager for ""
I1119 22:36:05.551288 204649 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1119 22:36:05.554507 204649 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1119 22:36:05.557507 204649 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1119 22:36:05.576310 204649 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1119 22:36:05.576331 204649 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1119 22:36:05.593718 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1119 22:36:06.658889 204649 ssh_runner.go:235] Completed: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml: (1.065138821s)
I1119 22:36:06.658975 204649 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1119 22:36:06.659094 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:06.659175 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-264160 minikube.k8s.io/updated_at=2025_11_19T22_36_06_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=08454a179ffa60c8ae500105aac58654b5cdef58 minikube.k8s.io/name=old-k8s-version-264160 minikube.k8s.io/primary=true
I1119 22:36:06.818009 204649 ops.go:34] apiserver oom_adj: -16
I1119 22:36:06.818101 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:07.318669 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:07.818290 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:08.318653 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:08.818829 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:09.318705 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:09.818670 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:10.318656 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:10.818343 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:11.318742 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:11.818660 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:12.318643 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:12.818204 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:13.318233 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:13.818478 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:14.318102 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:14.818178 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:15.318224 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:15.818601 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:16.319007 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:16.818836 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:17.318883 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:17.818083 204649 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:36:18.005461 204649 kubeadm.go:1114] duration metric: took 11.346407343s to wait for elevateKubeSystemPrivileges
I1119 22:36:18.005498 204649 kubeadm.go:403] duration metric: took 31.702712181s to StartCluster
I1119 22:36:18.005516 204649 settings.go:142] acquiring lock: {Name:mk5c8f7d46662d574c7e53cf7b09709855a1e14f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:36:18.005603 204649 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21918-2347/kubeconfig
I1119 22:36:18.006647 204649 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-2347/kubeconfig: {Name:mk670f88d9cb1be22f05f7db4ddcfb97af791e42 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:36:18.006944 204649 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1119 22:36:18.006951 204649 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1119 22:36:18.007274 204649 config.go:182] Loaded profile config "old-k8s-version-264160": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1119 22:36:18.007313 204649 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1119 22:36:18.007401 204649 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-264160"
I1119 22:36:18.007419 204649 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-264160"
I1119 22:36:18.007444 204649 host.go:66] Checking if "old-k8s-version-264160" exists ...
I1119 22:36:18.007919 204649 cli_runner.go:164] Run: docker container inspect old-k8s-version-264160 --format={{.State.Status}}
I1119 22:36:18.008446 204649 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-264160"
I1119 22:36:18.008469 204649 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-264160"
I1119 22:36:18.008780 204649 cli_runner.go:164] Run: docker container inspect old-k8s-version-264160 --format={{.State.Status}}
I1119 22:36:18.011866 204649 out.go:179] * Verifying Kubernetes components...
I1119 22:36:18.014838 204649 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1119 22:36:18.055880 204649 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:36:18.056763 204649 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-264160"
I1119 22:36:18.056800 204649 host.go:66] Checking if "old-k8s-version-264160" exists ...
I1119 22:36:18.057242 204649 cli_runner.go:164] Run: docker container inspect old-k8s-version-264160 --format={{.State.Status}}
I1119 22:36:18.059443 204649 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1119 22:36:18.059467 204649 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1119 22:36:18.059527 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:36:18.093613 204649 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1119 22:36:18.093726 204649 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33054 SSHKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa Username:docker}
I1119 22:36:18.095300 204649 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1119 22:36:18.095428 204649 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-264160
I1119 22:36:18.135800 204649 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33054 SSHKeyPath:/home/jenkins/minikube-integration/21918-2347/.minikube/machines/old-k8s-version-264160/id_rsa Username:docker}
I1119 22:36:18.357324 204649 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1119 22:36:18.357451 204649 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1119 22:36:18.439741 204649 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1119 22:36:18.443940 204649 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1119 22:36:19.165631 204649 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-264160" to be "Ready" ...
I1119 22:36:19.165952 204649 start.go:977] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I1119 22:36:19.668262 204649 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.228448448s)
I1119 22:36:19.668305 204649 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (1.224346607s)
I1119 22:36:19.682930 204649 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-264160" context rescaled to 1 replicas
I1119 22:36:19.691208 204649 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1119 22:36:19.694506 204649 addons.go:515] duration metric: took 1.687167131s for enable addons: enabled=[storage-provisioner default-storageclass]
W1119 22:36:21.170389 204649 node_ready.go:57] node "old-k8s-version-264160" has "Ready":"False" status (will retry)
W1119 22:36:23.669181 204649 node_ready.go:57] node "old-k8s-version-264160" has "Ready":"False" status (will retry)
W1119 22:36:26.169468 204649 node_ready.go:57] node "old-k8s-version-264160" has "Ready":"False" status (will retry)
W1119 22:36:28.668771 204649 node_ready.go:57] node "old-k8s-version-264160" has "Ready":"False" status (will retry)
W1119 22:36:30.669387 204649 node_ready.go:57] node "old-k8s-version-264160" has "Ready":"False" status (will retry)
I1119 22:36:31.179436 204649 node_ready.go:49] node "old-k8s-version-264160" is "Ready"
I1119 22:36:31.179462 204649 node_ready.go:38] duration metric: took 12.013798629s for node "old-k8s-version-264160" to be "Ready" ...
I1119 22:36:31.179475 204649 api_server.go:52] waiting for apiserver process to appear ...
I1119 22:36:31.179538 204649 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1119 22:36:31.199071 204649 api_server.go:72] duration metric: took 13.192088991s to wait for apiserver process to appear ...
I1119 22:36:31.199094 204649 api_server.go:88] waiting for apiserver healthz status ...
I1119 22:36:31.199116 204649 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:36:31.209770 204649 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I1119 22:36:31.211739 204649 api_server.go:141] control plane version: v1.28.0
I1119 22:36:31.211767 204649 api_server.go:131] duration metric: took 12.666386ms to wait for apiserver health ...
I1119 22:36:31.211777 204649 system_pods.go:43] waiting for kube-system pods to appear ...
I1119 22:36:31.216012 204649 system_pods.go:59] 8 kube-system pods found
I1119 22:36:31.216054 204649 system_pods.go:61] "coredns-5dd5756b68-vz7zx" [7e7645ad-49a9-4f0c-89cc-128538e4d95c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:36:31.216062 204649 system_pods.go:61] "etcd-old-k8s-version-264160" [1bd42d38-2921-483d-b656-d1f12178141b] Running
I1119 22:36:31.216068 204649 system_pods.go:61] "kindnet-m9nqq" [2f9f6fbb-c725-49fd-ba3a-c84a7640aac2] Running
I1119 22:36:31.216073 204649 system_pods.go:61] "kube-apiserver-old-k8s-version-264160" [454724a2-4fd6-4dc1-9cc1-a4b60944a9df] Running
I1119 22:36:31.216084 204649 system_pods.go:61] "kube-controller-manager-old-k8s-version-264160" [a5ad5849-09a1-43bd-861a-8c92712b0a14] Running
I1119 22:36:31.216088 204649 system_pods.go:61] "kube-proxy-zzmnr" [3ee1645f-fba5-4206-bb83-70d298a4c5ac] Running
I1119 22:36:31.216100 204649 system_pods.go:61] "kube-scheduler-old-k8s-version-264160" [fbad20e1-7729-4503-b929-bc32986a00e8] Running
I1119 22:36:31.216106 204649 system_pods.go:61] "storage-provisioner" [8e2dda77-5a6d-4796-926b-5a06158f8cdf] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1119 22:36:31.216112 204649 system_pods.go:74] duration metric: took 4.329001ms to wait for pod list to return data ...
I1119 22:36:31.216127 204649 default_sa.go:34] waiting for default service account to be created ...
I1119 22:36:31.219246 204649 default_sa.go:45] found service account: "default"
I1119 22:36:31.219283 204649 default_sa.go:55] duration metric: took 3.150461ms for default service account to be created ...
I1119 22:36:31.219293 204649 system_pods.go:116] waiting for k8s-apps to be running ...
I1119 22:36:31.226730 204649 system_pods.go:86] 8 kube-system pods found
I1119 22:36:31.226780 204649 system_pods.go:89] "coredns-5dd5756b68-vz7zx" [7e7645ad-49a9-4f0c-89cc-128538e4d95c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:36:31.226788 204649 system_pods.go:89] "etcd-old-k8s-version-264160" [1bd42d38-2921-483d-b656-d1f12178141b] Running
I1119 22:36:31.226795 204649 system_pods.go:89] "kindnet-m9nqq" [2f9f6fbb-c725-49fd-ba3a-c84a7640aac2] Running
I1119 22:36:31.226801 204649 system_pods.go:89] "kube-apiserver-old-k8s-version-264160" [454724a2-4fd6-4dc1-9cc1-a4b60944a9df] Running
I1119 22:36:31.226820 204649 system_pods.go:89] "kube-controller-manager-old-k8s-version-264160" [a5ad5849-09a1-43bd-861a-8c92712b0a14] Running
I1119 22:36:31.226840 204649 system_pods.go:89] "kube-proxy-zzmnr" [3ee1645f-fba5-4206-bb83-70d298a4c5ac] Running
I1119 22:36:31.226854 204649 system_pods.go:89] "kube-scheduler-old-k8s-version-264160" [fbad20e1-7729-4503-b929-bc32986a00e8] Running
I1119 22:36:31.226880 204649 system_pods.go:89] "storage-provisioner" [8e2dda77-5a6d-4796-926b-5a06158f8cdf] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1119 22:36:31.226914 204649 retry.go:31] will retry after 302.789316ms: missing components: kube-dns
I1119 22:36:31.534752 204649 system_pods.go:86] 8 kube-system pods found
I1119 22:36:31.534798 204649 system_pods.go:89] "coredns-5dd5756b68-vz7zx" [7e7645ad-49a9-4f0c-89cc-128538e4d95c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:36:31.534805 204649 system_pods.go:89] "etcd-old-k8s-version-264160" [1bd42d38-2921-483d-b656-d1f12178141b] Running
I1119 22:36:31.534811 204649 system_pods.go:89] "kindnet-m9nqq" [2f9f6fbb-c725-49fd-ba3a-c84a7640aac2] Running
I1119 22:36:31.534815 204649 system_pods.go:89] "kube-apiserver-old-k8s-version-264160" [454724a2-4fd6-4dc1-9cc1-a4b60944a9df] Running
I1119 22:36:31.534821 204649 system_pods.go:89] "kube-controller-manager-old-k8s-version-264160" [a5ad5849-09a1-43bd-861a-8c92712b0a14] Running
I1119 22:36:31.534825 204649 system_pods.go:89] "kube-proxy-zzmnr" [3ee1645f-fba5-4206-bb83-70d298a4c5ac] Running
I1119 22:36:31.534829 204649 system_pods.go:89] "kube-scheduler-old-k8s-version-264160" [fbad20e1-7729-4503-b929-bc32986a00e8] Running
I1119 22:36:31.534838 204649 system_pods.go:89] "storage-provisioner" [8e2dda77-5a6d-4796-926b-5a06158f8cdf] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1119 22:36:31.534852 204649 retry.go:31] will retry after 260.752212ms: missing components: kube-dns
I1119 22:36:31.802433 204649 system_pods.go:86] 8 kube-system pods found
I1119 22:36:31.802477 204649 system_pods.go:89] "coredns-5dd5756b68-vz7zx" [7e7645ad-49a9-4f0c-89cc-128538e4d95c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:36:31.802484 204649 system_pods.go:89] "etcd-old-k8s-version-264160" [1bd42d38-2921-483d-b656-d1f12178141b] Running
I1119 22:36:31.802492 204649 system_pods.go:89] "kindnet-m9nqq" [2f9f6fbb-c725-49fd-ba3a-c84a7640aac2] Running
I1119 22:36:31.802496 204649 system_pods.go:89] "kube-apiserver-old-k8s-version-264160" [454724a2-4fd6-4dc1-9cc1-a4b60944a9df] Running
I1119 22:36:31.802502 204649 system_pods.go:89] "kube-controller-manager-old-k8s-version-264160" [a5ad5849-09a1-43bd-861a-8c92712b0a14] Running
I1119 22:36:31.802506 204649 system_pods.go:89] "kube-proxy-zzmnr" [3ee1645f-fba5-4206-bb83-70d298a4c5ac] Running
I1119 22:36:31.802510 204649 system_pods.go:89] "kube-scheduler-old-k8s-version-264160" [fbad20e1-7729-4503-b929-bc32986a00e8] Running
I1119 22:36:31.802517 204649 system_pods.go:89] "storage-provisioner" [8e2dda77-5a6d-4796-926b-5a06158f8cdf] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1119 22:36:31.802540 204649 retry.go:31] will retry after 341.00697ms: missing components: kube-dns
I1119 22:36:32.148247 204649 system_pods.go:86] 8 kube-system pods found
I1119 22:36:32.148281 204649 system_pods.go:89] "coredns-5dd5756b68-vz7zx" [7e7645ad-49a9-4f0c-89cc-128538e4d95c] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:36:32.148298 204649 system_pods.go:89] "etcd-old-k8s-version-264160" [1bd42d38-2921-483d-b656-d1f12178141b] Running
I1119 22:36:32.148304 204649 system_pods.go:89] "kindnet-m9nqq" [2f9f6fbb-c725-49fd-ba3a-c84a7640aac2] Running
I1119 22:36:32.148309 204649 system_pods.go:89] "kube-apiserver-old-k8s-version-264160" [454724a2-4fd6-4dc1-9cc1-a4b60944a9df] Running
I1119 22:36:32.148314 204649 system_pods.go:89] "kube-controller-manager-old-k8s-version-264160" [a5ad5849-09a1-43bd-861a-8c92712b0a14] Running
I1119 22:36:32.148320 204649 system_pods.go:89] "kube-proxy-zzmnr" [3ee1645f-fba5-4206-bb83-70d298a4c5ac] Running
I1119 22:36:32.148329 204649 system_pods.go:89] "kube-scheduler-old-k8s-version-264160" [fbad20e1-7729-4503-b929-bc32986a00e8] Running
I1119 22:36:32.148333 204649 system_pods.go:89] "storage-provisioner" [8e2dda77-5a6d-4796-926b-5a06158f8cdf] Running
I1119 22:36:32.148348 204649 system_pods.go:126] duration metric: took 929.047421ms to wait for k8s-apps to be running ...
I1119 22:36:32.148356 204649 system_svc.go:44] waiting for kubelet service to be running ....
I1119 22:36:32.148423 204649 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1119 22:36:32.175720 204649 system_svc.go:56] duration metric: took 27.353086ms WaitForService to wait for kubelet
I1119 22:36:32.175754 204649 kubeadm.go:587] duration metric: took 14.168776732s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1119 22:36:32.175782 204649 node_conditions.go:102] verifying NodePressure condition ...
I1119 22:36:32.178856 204649 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1119 22:36:32.178889 204649 node_conditions.go:123] node cpu capacity is 2
I1119 22:36:32.178903 204649 node_conditions.go:105] duration metric: took 3.115367ms to run NodePressure ...
I1119 22:36:32.178915 204649 start.go:242] waiting for startup goroutines ...
I1119 22:36:32.178933 204649 start.go:247] waiting for cluster config update ...
I1119 22:36:32.178949 204649 start.go:256] writing updated cluster config ...
I1119 22:36:32.179275 204649 ssh_runner.go:195] Run: rm -f paused
I1119 22:36:32.186678 204649 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1119 22:36:32.192039 204649 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-vz7zx" in "kube-system" namespace to be "Ready" or be gone ...
W1119 22:36:34.198510 204649 pod_ready.go:104] pod "coredns-5dd5756b68-vz7zx" is not "Ready", error: <nil>
W1119 22:36:36.198937 204649 pod_ready.go:104] pod "coredns-5dd5756b68-vz7zx" is not "Ready", error: <nil>
W1119 22:36:38.698791 204649 pod_ready.go:104] pod "coredns-5dd5756b68-vz7zx" is not "Ready", error: <nil>
W1119 22:36:41.198015 204649 pod_ready.go:104] pod "coredns-5dd5756b68-vz7zx" is not "Ready", error: <nil>
I1119 22:36:41.698204 204649 pod_ready.go:94] pod "coredns-5dd5756b68-vz7zx" is "Ready"
I1119 22:36:41.698233 204649 pod_ready.go:86] duration metric: took 9.50616482s for pod "coredns-5dd5756b68-vz7zx" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:41.701276 204649 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:41.706418 204649 pod_ready.go:94] pod "etcd-old-k8s-version-264160" is "Ready"
I1119 22:36:41.706451 204649 pod_ready.go:86] duration metric: took 5.148925ms for pod "etcd-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:41.709706 204649 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:41.715470 204649 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-264160" is "Ready"
I1119 22:36:41.715499 204649 pod_ready.go:86] duration metric: took 5.766499ms for pod "kube-apiserver-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:41.718802 204649 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:41.896506 204649 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-264160" is "Ready"
I1119 22:36:41.896538 204649 pod_ready.go:86] duration metric: took 177.710699ms for pod "kube-controller-manager-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:42.096924 204649 pod_ready.go:83] waiting for pod "kube-proxy-zzmnr" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:42.496606 204649 pod_ready.go:94] pod "kube-proxy-zzmnr" is "Ready"
I1119 22:36:42.496635 204649 pod_ready.go:86] duration metric: took 399.679699ms for pod "kube-proxy-zzmnr" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:42.696640 204649 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:43.096504 204649 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-264160" is "Ready"
I1119 22:36:43.096533 204649 pod_ready.go:86] duration metric: took 399.863388ms for pod "kube-scheduler-old-k8s-version-264160" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:36:43.096547 204649 pod_ready.go:40] duration metric: took 10.90982149s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1119 22:36:43.158402 204649 start.go:628] kubectl: 1.33.2, cluster: 1.28.0 (minor skew: 5)
I1119 22:36:43.161490 204649 out.go:203]
W1119 22:36:43.164427 204649 out.go:285] ! /usr/local/bin/kubectl is version 1.33.2, which may have incompatibilities with Kubernetes 1.28.0.
I1119 22:36:43.167321 204649 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1119 22:36:43.171088 204649 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-264160" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
85ec8d942d110 1611cd07b61d5 9 seconds ago Running busybox 0 1b11528fdca0b busybox default
0b5a95c859ac3 97e04611ad434 24 seconds ago Running coredns 0 aa34d2193fc4c coredns-5dd5756b68-vz7zx kube-system
f62b743b6725e ba04bb24b9575 24 seconds ago Running storage-provisioner 0 2bededbe57122 storage-provisioner kube-system
3dc4045566ee8 b1a8c6f707935 35 seconds ago Running kindnet-cni 0 232dd2b4b80b5 kindnet-m9nqq kube-system
e5c22c9877dd1 940f54a5bcae9 37 seconds ago Running kube-proxy 0 f45778acb4883 kube-proxy-zzmnr kube-system
0aa1bd28b6073 762dce4090c5f 59 seconds ago Running kube-scheduler 0 4b7124d3d4b79 kube-scheduler-old-k8s-version-264160 kube-system
83a25278b16a7 00543d2fe5d71 59 seconds ago Running kube-apiserver 0 67f5df81322ce kube-apiserver-old-k8s-version-264160 kube-system
9ce9313d9aae4 46cc66ccc7c19 59 seconds ago Running kube-controller-manager 0 0783ca7945d35 kube-controller-manager-old-k8s-version-264160 kube-system
85f86fccea082 9cdd6470f48c8 59 seconds ago Running etcd 0 4969a45c845f9 etcd-old-k8s-version-264160 kube-system
==> containerd <==
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.649388743Z" level=info msg="CreateContainer within sandbox \"aa34d2193fc4cf037239bc48a6fac96674b060cb63b8de7320bb53007ec52479\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.651192399Z" level=info msg="connecting to shim f62b743b6725ec9ff1e91e664da6c9ce15d837afbab3608cc02fec3c9bd3d929" address="unix:///run/containerd/s/1693fe8eea8ad33a7610805dc3ed40de55c61613614162362386d2386e86ea05" protocol=ttrpc version=3
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.675391127Z" level=info msg="Container 0b5a95c859ac383d11c4aa9fb013d9cb4c21b0ac201d6a26cc3ec130b9027e9f: CDI devices from CRI Config.CDIDevices: []"
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.690874899Z" level=info msg="CreateContainer within sandbox \"aa34d2193fc4cf037239bc48a6fac96674b060cb63b8de7320bb53007ec52479\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"0b5a95c859ac383d11c4aa9fb013d9cb4c21b0ac201d6a26cc3ec130b9027e9f\""
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.691891617Z" level=info msg="StartContainer for \"0b5a95c859ac383d11c4aa9fb013d9cb4c21b0ac201d6a26cc3ec130b9027e9f\""
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.692987039Z" level=info msg="connecting to shim 0b5a95c859ac383d11c4aa9fb013d9cb4c21b0ac201d6a26cc3ec130b9027e9f" address="unix:///run/containerd/s/ba4b3d499342aaf3ebd6be16fa5ad2a140167ea49a534a0a812a3977c5dcf983" protocol=ttrpc version=3
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.744679204Z" level=info msg="StartContainer for \"f62b743b6725ec9ff1e91e664da6c9ce15d837afbab3608cc02fec3c9bd3d929\" returns successfully"
Nov 19 22:36:31 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:31.792512880Z" level=info msg="StartContainer for \"0b5a95c859ac383d11c4aa9fb013d9cb4c21b0ac201d6a26cc3ec130b9027e9f\" returns successfully"
Nov 19 22:36:43 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:43.710209778Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:2af6deb4-937f-4b9b-9de6-995e75a080b8,Namespace:default,Attempt:0,}"
Nov 19 22:36:43 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:43.790459559Z" level=info msg="connecting to shim 1b11528fdca0ba74e5c7786578d6850eb5b37f9540b5e04e610639ce7fbd811f" address="unix:///run/containerd/s/b036a265eb01a921a8d2ed1a42211f4774df4a741b42e6007a96fa06394b6381" namespace=k8s.io protocol=ttrpc version=3
Nov 19 22:36:43 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:43.849747951Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:2af6deb4-937f-4b9b-9de6-995e75a080b8,Namespace:default,Attempt:0,} returns sandbox id \"1b11528fdca0ba74e5c7786578d6850eb5b37f9540b5e04e610639ce7fbd811f\""
Nov 19 22:36:43 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:43.854324085Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.047353549Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.049424358Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=1937184"
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.052705979Z" level=info msg="ImageCreate event name:\"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.058110078Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.059158943Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"1935750\" in 2.204520106s"
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.059209750Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\""
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.063270047Z" level=info msg="CreateContainer within sandbox \"1b11528fdca0ba74e5c7786578d6850eb5b37f9540b5e04e610639ce7fbd811f\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.078717460Z" level=info msg="Container 85ec8d942d1102ad7f23f0923c0afa921c51c4b09ac0f93dc33203a257d7ca57: CDI devices from CRI Config.CDIDevices: []"
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.090943881Z" level=info msg="CreateContainer within sandbox \"1b11528fdca0ba74e5c7786578d6850eb5b37f9540b5e04e610639ce7fbd811f\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"85ec8d942d1102ad7f23f0923c0afa921c51c4b09ac0f93dc33203a257d7ca57\""
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.091676862Z" level=info msg="StartContainer for \"85ec8d942d1102ad7f23f0923c0afa921c51c4b09ac0f93dc33203a257d7ca57\""
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.092550045Z" level=info msg="connecting to shim 85ec8d942d1102ad7f23f0923c0afa921c51c4b09ac0f93dc33203a257d7ca57" address="unix:///run/containerd/s/b036a265eb01a921a8d2ed1a42211f4774df4a741b42e6007a96fa06394b6381" protocol=ttrpc version=3
Nov 19 22:36:46 old-k8s-version-264160 containerd[760]: time="2025-11-19T22:36:46.167983720Z" level=info msg="StartContainer for \"85ec8d942d1102ad7f23f0923c0afa921c51c4b09ac0f93dc33203a257d7ca57\" returns successfully"
Nov 19 22:36:52 old-k8s-version-264160 containerd[760]: E1119 22:36:52.581929 760 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [0b5a95c859ac383d11c4aa9fb013d9cb4c21b0ac201d6a26cc3ec130b9027e9f] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = b7aacdf6a6aa730aafe4d018cac9b7b5ecfb346cba84a99f64521f87aef8b4958639c1cf97967716465791d05bd38f372615327b7cb1d93c850bae532744d54d
CoreDNS-1.10.1
linux/arm64, go1.20, 055b2c3
[INFO] 127.0.0.1:51463 - 23570 "HINFO IN 6404155507127924057.1273287447177964912. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.026393207s
==> describe nodes <==
Name: old-k8s-version-264160
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=old-k8s-version-264160
kubernetes.io/os=linux
minikube.k8s.io/commit=08454a179ffa60c8ae500105aac58654b5cdef58
minikube.k8s.io/name=old-k8s-version-264160
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_19T22_36_06_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Wed, 19 Nov 2025 22:36:02 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-264160
AcquireTime: <unset>
RenewTime: Wed, 19 Nov 2025 22:36:46 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Wed, 19 Nov 2025 22:36:36 +0000 Wed, 19 Nov 2025 22:35:57 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 19 Nov 2025 22:36:36 +0000 Wed, 19 Nov 2025 22:35:57 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 19 Nov 2025 22:36:36 +0000 Wed, 19 Nov 2025 22:35:57 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 19 Nov 2025 22:36:36 +0000 Wed, 19 Nov 2025 22:36:31 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: old-k8s-version-264160
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 2de5c7cc592a67801eaa2fbe691dd049
System UUID: b680c3d2-ce1c-409c-bfdc-4a24b39315bd
Boot ID: b3875353-65b3-44b7-ad72-afadd7e2486a
Kernel Version: 5.15.0-1084-aws
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13s
kube-system coredns-5dd5756b68-vz7zx 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 39s
kube-system etcd-old-k8s-version-264160 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 51s
kube-system kindnet-m9nqq 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 39s
kube-system kube-apiserver-old-k8s-version-264160 250m (12%) 0 (0%) 0 (0%) 0 (0%) 52s
kube-system kube-controller-manager-old-k8s-version-264160 200m (10%) 0 (0%) 0 (0%) 0 (0%) 51s
kube-system kube-proxy-zzmnr 0 (0%) 0 (0%) 0 (0%) 0 (0%) 39s
kube-system kube-scheduler-old-k8s-version-264160 100m (5%) 0 (0%) 0 (0%) 0 (0%) 53s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 37s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 100m (5%)
memory 220Mi (2%) 220Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 37s kube-proxy
Normal NodeHasSufficientMemory 61s (x8 over 61s) kubelet Node old-k8s-version-264160 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 61s (x8 over 61s) kubelet Node old-k8s-version-264160 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 61s (x7 over 61s) kubelet Node old-k8s-version-264160 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 61s kubelet Updated Node Allocatable limit across pods
Normal Starting 51s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 51s kubelet Node old-k8s-version-264160 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 51s kubelet Node old-k8s-version-264160 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 51s kubelet Node old-k8s-version-264160 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 51s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 40s node-controller Node old-k8s-version-264160 event: Registered Node old-k8s-version-264160 in Controller
Normal NodeReady 25s kubelet Node old-k8s-version-264160 status is now: NodeReady
==> dmesg <==
[Nov19 21:17] ACPI: SRAT not present
[ +0.000000] ACPI: SRAT not present
[ +0.000000] SPI driver altr_a10sr has no spi_device_id for altr,a10sr
[ +0.032038] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[Nov19 21:18] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.034282] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +0.730183] ena 0000:00:05.0: LLQ is not supported Fallback to host mode policy.
[ +6.763794] kauditd_printk_skb: 36 callbacks suppressed
[Nov19 21:50] hrtimer: interrupt took 11278311 ns
==> etcd [85f86fccea0828d06ebe49ecd748897b5c79764ef02605e9b0dcfe4d0da55086] <==
{"level":"info","ts":"2025-11-19T22:35:56.498496Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 switched to configuration voters=(16896983918768216326)"}
{"level":"info","ts":"2025-11-19T22:35:56.498586Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","added-peer-id":"ea7e25599daad906","added-peer-peer-urls":["https://192.168.76.2:2380"]}
{"level":"info","ts":"2025-11-19T22:35:56.499212Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-11-19T22:35:56.499356Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-11-19T22:35:56.49937Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.76.2:2380"}
{"level":"info","ts":"2025-11-19T22:35:56.500029Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"ea7e25599daad906","initial-advertise-peer-urls":["https://192.168.76.2:2380"],"listen-peer-urls":["https://192.168.76.2:2380"],"advertise-client-urls":["https://192.168.76.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.76.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-19T22:35:56.500058Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-19T22:35:57.378189Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-19T22:35:57.378405Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-19T22:35:57.378513Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgPreVoteResp from ea7e25599daad906 at term 1"}
{"level":"info","ts":"2025-11-19T22:35:57.378604Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became candidate at term 2"}
{"level":"info","ts":"2025-11-19T22:35:57.378648Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgVoteResp from ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-19T22:35:57.378765Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became leader at term 2"}
{"level":"info","ts":"2025-11-19T22:35:57.378861Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: ea7e25599daad906 elected leader ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-19T22:35:57.380547Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:35:57.381686Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"ea7e25599daad906","local-member-attributes":"{Name:old-k8s-version-264160 ClientURLs:[https://192.168.76.2:2379]}","request-path":"/0/members/ea7e25599daad906/attributes","cluster-id":"6f20f2c4b2fb5f8a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-19T22:35:57.381779Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-19T22:35:57.385726Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:35:57.385955Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:35:57.386049Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:35:57.386901Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-19T22:35:57.38702Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-19T22:35:57.387495Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-19T22:35:57.38756Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-19T22:35:57.38824Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.76.2:2379"}
==> kernel <==
22:36:56 up 1:18, 0 user, load average: 2.22, 3.50, 2.75
Linux old-k8s-version-264160 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [3dc4045566ee801891a80913f3c0d08405af235938655312d13ffdb5bece221c] <==
I1119 22:36:20.789101 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1119 22:36:20.789364 1 main.go:139] hostIP = 192.168.76.2
podIP = 192.168.76.2
I1119 22:36:20.789559 1 main.go:148] setting mtu 1500 for CNI
I1119 22:36:20.789578 1 main.go:178] kindnetd IP family: "ipv4"
I1119 22:36:20.789592 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-19T22:36:20Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1119 22:36:20.990706 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1119 22:36:20.990731 1 controller.go:381] "Waiting for informer caches to sync"
I1119 22:36:20.990740 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1119 22:36:20.992039 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1119 22:36:21.190870 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1119 22:36:21.190976 1 metrics.go:72] Registering metrics
I1119 22:36:21.191093 1 controller.go:711] "Syncing nftables rules"
I1119 22:36:30.994216 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1119 22:36:30.994256 1 main.go:301] handling current node
I1119 22:36:40.992854 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1119 22:36:40.992893 1 main.go:301] handling current node
I1119 22:36:50.992386 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1119 22:36:50.992422 1 main.go:301] handling current node
==> kube-apiserver [83a25278b16a7bc6a4252ba6f8c2ce8a60621e9d435c828ededf66aecfda2443] <==
I1119 22:36:02.053875 1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
I1119 22:36:02.055155 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1119 22:36:02.055381 1 shared_informer.go:318] Caches are synced for configmaps
I1119 22:36:02.055593 1 aggregator.go:166] initial CRD sync complete...
I1119 22:36:02.055613 1 autoregister_controller.go:141] Starting autoregister controller
I1119 22:36:02.055620 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1119 22:36:02.055627 1 cache.go:39] Caches are synced for autoregister controller
I1119 22:36:02.066246 1 controller.go:624] quota admission added evaluator for: namespaces
I1119 22:36:02.090717 1 shared_informer.go:318] Caches are synced for node_authorizer
I1119 22:36:02.094391 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1119 22:36:02.747612 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1119 22:36:02.754129 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1119 22:36:02.754179 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1119 22:36:03.457051 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1119 22:36:03.510012 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1119 22:36:03.578204 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1119 22:36:03.591054 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I1119 22:36:03.592389 1 controller.go:624] quota admission added evaluator for: endpoints
I1119 22:36:03.598109 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1119 22:36:03.932055 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1119 22:36:05.470569 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1119 22:36:05.488449 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1119 22:36:05.503361 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1119 22:36:17.195970 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1119 22:36:17.744558 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [9ce9313d9aae43100c1f669a0216b1ce028ec3fd90f9042e2780602b3b9dabcf] <==
I1119 22:36:17.007432 1 event.go:307] "Event occurred" object="kube-system/kube-scheduler-old-k8s-version-264160" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1119 22:36:17.007693 1 event.go:307] "Event occurred" object="kube-system/kube-controller-manager-old-k8s-version-264160" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1119 22:36:17.202717 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1119 22:36:17.309848 1 shared_informer.go:318] Caches are synced for garbage collector
I1119 22:36:17.309885 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1119 22:36:17.345695 1 shared_informer.go:318] Caches are synced for garbage collector
I1119 22:36:17.758353 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-m9nqq"
I1119 22:36:17.771209 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-zzmnr"
I1119 22:36:17.833691 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-vz7zx"
I1119 22:36:17.844755 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-qtkkx"
I1119 22:36:17.870437 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="668.610241ms"
I1119 22:36:17.886833 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="15.893452ms"
I1119 22:36:17.887202 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="257.726µs"
I1119 22:36:17.895692 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="94.147µs"
I1119 22:36:19.212001 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1119 22:36:19.246883 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-qtkkx"
I1119 22:36:19.269962 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="59.063512ms"
I1119 22:36:19.286597 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="16.59033ms"
I1119 22:36:19.287055 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="114.554µs"
I1119 22:36:31.144412 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="61.433µs"
I1119 22:36:31.166398 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="94.171µs"
I1119 22:36:31.900825 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="227.679µs"
I1119 22:36:31.988585 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
I1119 22:36:41.472572 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="12.52455ms"
I1119 22:36:41.472677 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="53.621µs"
==> kube-proxy [e5c22c9877dd10241d18184894e9e614c72ec9cfb5a007bdae07416884620fcb] <==
I1119 22:36:18.757438 1 server_others.go:69] "Using iptables proxy"
I1119 22:36:18.778593 1 node.go:141] Successfully retrieved node IP: 192.168.76.2
I1119 22:36:18.913376 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1119 22:36:18.915584 1 server_others.go:152] "Using iptables Proxier"
I1119 22:36:18.915624 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1119 22:36:18.915633 1 server_others.go:438] "Defaulting to no-op detect-local"
I1119 22:36:18.915677 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1119 22:36:18.915959 1 server.go:846] "Version info" version="v1.28.0"
I1119 22:36:18.915974 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1119 22:36:18.920244 1 config.go:188] "Starting service config controller"
I1119 22:36:18.920284 1 shared_informer.go:311] Waiting for caches to sync for service config
I1119 22:36:18.920312 1 config.go:97] "Starting endpoint slice config controller"
I1119 22:36:18.920331 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1119 22:36:18.920900 1 config.go:315] "Starting node config controller"
I1119 22:36:18.920980 1 shared_informer.go:311] Waiting for caches to sync for node config
I1119 22:36:19.021747 1 shared_informer.go:318] Caches are synced for node config
I1119 22:36:19.021777 1 shared_informer.go:318] Caches are synced for service config
I1119 22:36:19.021803 1 shared_informer.go:318] Caches are synced for endpoint slice config
==> kube-scheduler [0aa1bd28b60733799ab92c2d108b32fc31d28ba32f45f38e766395ec615ed220] <==
W1119 22:36:02.058403 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1119 22:36:02.058916 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1119 22:36:02.058451 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1119 22:36:02.058979 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1119 22:36:02.058497 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1119 22:36:02.059039 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1119 22:36:02.058567 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1119 22:36:02.059110 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1119 22:36:02.058600 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1119 22:36:02.059179 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1119 22:36:02.058632 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1119 22:36:02.059241 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1119 22:36:02.878544 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1119 22:36:02.878578 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W1119 22:36:02.913574 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1119 22:36:02.913618 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1119 22:36:02.963123 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1119 22:36:02.963158 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1119 22:36:03.017826 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1119 22:36:03.018067 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1119 22:36:03.127020 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1119 22:36:03.127294 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1119 22:36:03.201758 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1119 22:36:03.202031 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
I1119 22:36:05.139254 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 19 22:36:16 old-k8s-version-264160 kubelet[1553]: I1119 22:36:16.880132 1553 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.764724 1553 topology_manager.go:215] "Topology Admit Handler" podUID="2f9f6fbb-c725-49fd-ba3a-c84a7640aac2" podNamespace="kube-system" podName="kindnet-m9nqq"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.781987 1553 topology_manager.go:215] "Topology Admit Handler" podUID="3ee1645f-fba5-4206-bb83-70d298a4c5ac" podNamespace="kube-system" podName="kube-proxy-zzmnr"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828089 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/2f9f6fbb-c725-49fd-ba3a-c84a7640aac2-xtables-lock\") pod \"kindnet-m9nqq\" (UID: \"2f9f6fbb-c725-49fd-ba3a-c84a7640aac2\") " pod="kube-system/kindnet-m9nqq"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828147 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/3ee1645f-fba5-4206-bb83-70d298a4c5ac-kube-proxy\") pod \"kube-proxy-zzmnr\" (UID: \"3ee1645f-fba5-4206-bb83-70d298a4c5ac\") " pod="kube-system/kube-proxy-zzmnr"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828177 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvk49\" (UniqueName: \"kubernetes.io/projected/2f9f6fbb-c725-49fd-ba3a-c84a7640aac2-kube-api-access-kvk49\") pod \"kindnet-m9nqq\" (UID: \"2f9f6fbb-c725-49fd-ba3a-c84a7640aac2\") " pod="kube-system/kindnet-m9nqq"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828200 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/3ee1645f-fba5-4206-bb83-70d298a4c5ac-xtables-lock\") pod \"kube-proxy-zzmnr\" (UID: \"3ee1645f-fba5-4206-bb83-70d298a4c5ac\") " pod="kube-system/kube-proxy-zzmnr"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828223 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3ee1645f-fba5-4206-bb83-70d298a4c5ac-lib-modules\") pod \"kube-proxy-zzmnr\" (UID: \"3ee1645f-fba5-4206-bb83-70d298a4c5ac\") " pod="kube-system/kube-proxy-zzmnr"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828251 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/2f9f6fbb-c725-49fd-ba3a-c84a7640aac2-lib-modules\") pod \"kindnet-m9nqq\" (UID: \"2f9f6fbb-c725-49fd-ba3a-c84a7640aac2\") " pod="kube-system/kindnet-m9nqq"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828274 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/2f9f6fbb-c725-49fd-ba3a-c84a7640aac2-cni-cfg\") pod \"kindnet-m9nqq\" (UID: \"2f9f6fbb-c725-49fd-ba3a-c84a7640aac2\") " pod="kube-system/kindnet-m9nqq"
Nov 19 22:36:17 old-k8s-version-264160 kubelet[1553]: I1119 22:36:17.828297 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fc7w4\" (UniqueName: \"kubernetes.io/projected/3ee1645f-fba5-4206-bb83-70d298a4c5ac-kube-api-access-fc7w4\") pod \"kube-proxy-zzmnr\" (UID: \"3ee1645f-fba5-4206-bb83-70d298a4c5ac\") " pod="kube-system/kube-proxy-zzmnr"
Nov 19 22:36:20 old-k8s-version-264160 kubelet[1553]: I1119 22:36:20.875429 1553 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-m9nqq" podStartSLOduration=1.9015295540000001 podCreationTimestamp="2025-11-19 22:36:17 +0000 UTC" firstStartedPulling="2025-11-19 22:36:18.551512561 +0000 UTC m=+13.118052946" lastFinishedPulling="2025-11-19 22:36:20.525369265 +0000 UTC m=+15.091909650" observedRunningTime="2025-11-19 22:36:20.875315381 +0000 UTC m=+15.441855783" watchObservedRunningTime="2025-11-19 22:36:20.875386258 +0000 UTC m=+15.441926643"
Nov 19 22:36:20 old-k8s-version-264160 kubelet[1553]: I1119 22:36:20.876203 1553 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-zzmnr" podStartSLOduration=3.87615718 podCreationTimestamp="2025-11-19 22:36:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-19 22:36:18.872222396 +0000 UTC m=+13.438762780" watchObservedRunningTime="2025-11-19 22:36:20.87615718 +0000 UTC m=+15.442697581"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.092782 1553 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.139366 1553 topology_manager.go:215] "Topology Admit Handler" podUID="7e7645ad-49a9-4f0c-89cc-128538e4d95c" podNamespace="kube-system" podName="coredns-5dd5756b68-vz7zx"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.152446 1553 topology_manager.go:215] "Topology Admit Handler" podUID="8e2dda77-5a6d-4796-926b-5a06158f8cdf" podNamespace="kube-system" podName="storage-provisioner"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.233967 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7e7645ad-49a9-4f0c-89cc-128538e4d95c-config-volume\") pod \"coredns-5dd5756b68-vz7zx\" (UID: \"7e7645ad-49a9-4f0c-89cc-128538e4d95c\") " pod="kube-system/coredns-5dd5756b68-vz7zx"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.234065 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkc9q\" (UniqueName: \"kubernetes.io/projected/7e7645ad-49a9-4f0c-89cc-128538e4d95c-kube-api-access-pkc9q\") pod \"coredns-5dd5756b68-vz7zx\" (UID: \"7e7645ad-49a9-4f0c-89cc-128538e4d95c\") " pod="kube-system/coredns-5dd5756b68-vz7zx"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.234125 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/8e2dda77-5a6d-4796-926b-5a06158f8cdf-tmp\") pod \"storage-provisioner\" (UID: \"8e2dda77-5a6d-4796-926b-5a06158f8cdf\") " pod="kube-system/storage-provisioner"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.234229 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dt4z\" (UniqueName: \"kubernetes.io/projected/8e2dda77-5a6d-4796-926b-5a06158f8cdf-kube-api-access-4dt4z\") pod \"storage-provisioner\" (UID: \"8e2dda77-5a6d-4796-926b-5a06158f8cdf\") " pod="kube-system/storage-provisioner"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.928942 1553 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-vz7zx" podStartSLOduration=14.928898879 podCreationTimestamp="2025-11-19 22:36:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-19 22:36:31.902288078 +0000 UTC m=+26.468828471" watchObservedRunningTime="2025-11-19 22:36:31.928898879 +0000 UTC m=+26.495439272"
Nov 19 22:36:31 old-k8s-version-264160 kubelet[1553]: I1119 22:36:31.929197 1553 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=12.929173877 podCreationTimestamp="2025-11-19 22:36:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-19 22:36:31.926923668 +0000 UTC m=+26.493464217" watchObservedRunningTime="2025-11-19 22:36:31.929173877 +0000 UTC m=+26.495714286"
Nov 19 22:36:43 old-k8s-version-264160 kubelet[1553]: I1119 22:36:43.392110 1553 topology_manager.go:215] "Topology Admit Handler" podUID="2af6deb4-937f-4b9b-9de6-995e75a080b8" podNamespace="default" podName="busybox"
Nov 19 22:36:43 old-k8s-version-264160 kubelet[1553]: I1119 22:36:43.523830 1553 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kb7ph\" (UniqueName: \"kubernetes.io/projected/2af6deb4-937f-4b9b-9de6-995e75a080b8-kube-api-access-kb7ph\") pod \"busybox\" (UID: \"2af6deb4-937f-4b9b-9de6-995e75a080b8\") " pod="default/busybox"
Nov 19 22:36:46 old-k8s-version-264160 kubelet[1553]: I1119 22:36:46.935525 1553 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.727293354 podCreationTimestamp="2025-11-19 22:36:43 +0000 UTC" firstStartedPulling="2025-11-19 22:36:43.851422103 +0000 UTC m=+38.417962488" lastFinishedPulling="2025-11-19 22:36:46.059604676 +0000 UTC m=+40.626145060" observedRunningTime="2025-11-19 22:36:46.934118134 +0000 UTC m=+41.500658519" watchObservedRunningTime="2025-11-19 22:36:46.935475926 +0000 UTC m=+41.502016319"
==> storage-provisioner [f62b743b6725ec9ff1e91e664da6c9ce15d837afbab3608cc02fec3c9bd3d929] <==
I1119 22:36:31.737660 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1119 22:36:31.757257 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1119 22:36:31.757310 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1119 22:36:31.769006 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1119 22:36:31.771663 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-264160_88781c45-d0c6-484e-abf4-8c2df680f8d6!
I1119 22:36:31.772385 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"62b15298-f39b-43d5-9d35-ddeafad4bd4d", APIVersion:"v1", ResourceVersion:"442", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-264160_88781c45-d0c6-484e-abf4-8c2df680f8d6 became leader
I1119 22:36:31.872085 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-264160_88781c45-d0c6-484e-abf4-8c2df680f8d6!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-264160 -n old-k8s-version-264160
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-264160 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestStartStop/group/old-k8s-version/serial/DeployApp (13.86s)