=== RUN TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-318786 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [f566ecf0-6907-464c-9185-0f1cac06d38f] Pending
helpers_test.go:352: "busybox" [f566ecf0-6907-464c-9185-0f1cac06d38f] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [f566ecf0-6907-464c-9185-0f1cac06d38f] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 9.003400034s
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-318786 exec busybox -- /bin/sh -c "ulimit -n"
start_stop_delete_test.go:194: 'ulimit -n' returned 1024, expected 1048576
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-318786
helpers_test.go:243: (dbg) docker inspect old-k8s-version-318786:
-- stdout --
[
{
"Id": "a1a9c211e03d84dc290244440868edd560e068d58cbff839724b36106b46b8b5",
"Created": "2025-11-24T13:59:48.707287298Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 203512,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-24T13:59:48.794762344Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:572c983e466f1f784136812eef5cc59ac623db764bc7704d3676c4643993fd08",
"ResolvConfPath": "/var/lib/docker/containers/a1a9c211e03d84dc290244440868edd560e068d58cbff839724b36106b46b8b5/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/a1a9c211e03d84dc290244440868edd560e068d58cbff839724b36106b46b8b5/hostname",
"HostsPath": "/var/lib/docker/containers/a1a9c211e03d84dc290244440868edd560e068d58cbff839724b36106b46b8b5/hosts",
"LogPath": "/var/lib/docker/containers/a1a9c211e03d84dc290244440868edd560e068d58cbff839724b36106b46b8b5/a1a9c211e03d84dc290244440868edd560e068d58cbff839724b36106b46b8b5-json.log",
"Name": "/old-k8s-version-318786",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-318786:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "old-k8s-version-318786",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "a1a9c211e03d84dc290244440868edd560e068d58cbff839724b36106b46b8b5",
"LowerDir": "/var/lib/docker/overlay2/6d81197f1905508bee65982ae14ace70a0ac50476483b3a6dbe6ee1b71c20126-init/diff:/var/lib/docker/overlay2/f206897dad0d7c6b66379aa7c75402ab98ba158a4fc5aedf84eda3d57da10430/diff",
"MergedDir": "/var/lib/docker/overlay2/6d81197f1905508bee65982ae14ace70a0ac50476483b3a6dbe6ee1b71c20126/merged",
"UpperDir": "/var/lib/docker/overlay2/6d81197f1905508bee65982ae14ace70a0ac50476483b3a6dbe6ee1b71c20126/diff",
"WorkDir": "/var/lib/docker/overlay2/6d81197f1905508bee65982ae14ace70a0ac50476483b3a6dbe6ee1b71c20126/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-318786",
"Source": "/var/lib/docker/volumes/old-k8s-version-318786/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-318786",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-318786",
"name.minikube.sigs.k8s.io": "old-k8s-version-318786",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "afc5451f55c0addfee2faf75046d85ee1aff51cfb29d1330d1b700fc0f910363",
"SandboxKey": "/var/run/docker/netns/afc5451f55c0",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33053"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33054"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33057"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33055"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33056"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"old-k8s-version-318786": {
"IPAMConfig": {
"IPv4Address": "192.168.85.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "aa:35:e5:9c:e1:30",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "3c8da78d6dab92e1227f095e0039dcc72885109237746924b800f0f7e07a64d9",
"EndpointID": "c068219706ac0808a20d3010c587a2e59831507d8b6c4030ff3e4a62ce6b15dc",
"Gateway": "192.168.85.1",
"IPAddress": "192.168.85.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-318786",
"a1a9c211e03d"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-318786 -n old-k8s-version-318786
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p old-k8s-version-318786 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p old-k8s-version-318786 logs -n 25: (1.183996865s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────────┬─────────┬─────────┬─────────────────────┬───────────
──────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────────┼─────────┼─────────┼─────────────────────┼───────────
──────────┤
│ ssh │ -p cilium-803934 sudo systemctl status cri-docker --all --full --no-pager │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo systemctl cat cri-docker --no-pager │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo cri-dockerd --version │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo systemctl status containerd --all --full --no-pager │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo systemctl cat containerd --no-pager │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo cat /lib/systemd/system/containerd.service │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo cat /etc/containerd/config.toml │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo containerd config dump │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo systemctl status crio --all --full --no-pager │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo systemctl cat crio --no-pager │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo crio config │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ delete │ -p cilium-803934 │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ 24 Nov 25 13:58 UTC │
│ start │ -p force-systemd-env-134839 --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-env-134839 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ 24 Nov 25 13:59 UTC │
│ delete │ -p kubernetes-upgrade-758885 │ kubernetes-upgrade-758885 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ 24 Nov 25 13:58 UTC │
│ start │ -p cert-expiration-865605 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=containerd │ cert-expiration-865605 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ 24 Nov 25 13:59 UTC │
│ ssh │ force-systemd-env-134839 ssh cat /etc/containerd/config.toml │ force-systemd-env-134839 │ jenkins │ v1.37.0 │ 24 Nov 25 13:59 UTC │ 24 Nov 25 13:59 UTC │
│ delete │ -p force-systemd-env-134839 │ force-systemd-env-134839 │ jenkins │ v1.37.0 │ 24 Nov 25 13:59 UTC │ 24 Nov 25 13:59 UTC │
│ start │ -p cert-options-440754 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-440754 │ jenkins │ v1.37.0 │ 24 Nov 25 13:59 UTC │ 24 Nov 25 13:59 UTC │
│ ssh │ cert-options-440754 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-440754 │ jenkins │ v1.37.0 │ 24 Nov 25 13:59 UTC │ 24 Nov 25 13:59 UTC │
│ ssh │ -p cert-options-440754 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-440754 │ jenkins │ v1.37.0 │ 24 Nov 25 13:59 UTC │ 24 Nov 25 13:59 UTC │
│ delete │ -p cert-options-440754 │ cert-options-440754 │ jenkins │ v1.37.0 │ 24 Nov 25 13:59 UTC │ 24 Nov 25 13:59 UTC │
│ start │ -p old-k8s-version-318786 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-318786 │ jenkins │ v1.37.0 │ 24 Nov 25 13:59 UTC │ 24 Nov 25 14:00 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────────┴─────────┴─────────┴─────────────────────┴───────────
──────────┘
==> Last Start <==
Log file created at: 2025/11/24 13:59:42
Running on machine: ip-172-31-24-2
Binary: Built with gc go1.25.3 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1124 13:59:42.406479 203121 out.go:360] Setting OutFile to fd 1 ...
I1124 13:59:42.406674 203121 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 13:59:42.406701 203121 out.go:374] Setting ErrFile to fd 2...
I1124 13:59:42.406722 203121 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 13:59:42.407140 203121 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21932-2368/.minikube/bin
I1124 13:59:42.407724 203121 out.go:368] Setting JSON to false
I1124 13:59:42.409260 203121 start.go:133] hostinfo: {"hostname":"ip-172-31-24-2","uptime":6131,"bootTime":1763986651,"procs":189,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"6d436adf-771e-4269-b9a3-c25fd4fca4f5"}
I1124 13:59:42.409372 203121 start.go:143] virtualization:
I1124 13:59:42.413282 203121 out.go:179] * [old-k8s-version-318786] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I1124 13:59:42.417925 203121 out.go:179] - MINIKUBE_LOCATION=21932
I1124 13:59:42.418098 203121 notify.go:221] Checking for updates...
I1124 13:59:42.424905 203121 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1124 13:59:42.428148 203121 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21932-2368/kubeconfig
I1124 13:59:42.431322 203121 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21932-2368/.minikube
I1124 13:59:42.434379 203121 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I1124 13:59:42.438100 203121 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1124 13:59:42.442160 203121 config.go:182] Loaded profile config "cert-expiration-865605": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1124 13:59:42.442285 203121 driver.go:422] Setting default libvirt URI to qemu:///system
I1124 13:59:42.470073 203121 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I1124 13:59:42.470195 203121 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 13:59:42.532782 203121 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-24 13:59:42.52123261 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aa
rch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path
:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1124 13:59:42.532892 203121 docker.go:319] overlay module found
I1124 13:59:42.536185 203121 out.go:179] * Using the docker driver based on user configuration
I1124 13:59:42.539175 203121 start.go:309] selected driver: docker
I1124 13:59:42.539208 203121 start.go:927] validating driver "docker" against <nil>
I1124 13:59:42.539232 203121 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1124 13:59:42.540233 203121 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 13:59:42.601740 203121 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-24 13:59:42.592481576 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pat
h:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1124 13:59:42.601887 203121 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1124 13:59:42.602115 203121 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 13:59:42.605231 203121 out.go:179] * Using Docker driver with root privileges
I1124 13:59:42.608204 203121 cni.go:84] Creating CNI manager for ""
I1124 13:59:42.608281 203121 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 13:59:42.608296 203121 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1124 13:59:42.608380 203121 start.go:353] cluster config:
{Name:old-k8s-version-318786 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-318786 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSH
AuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 13:59:42.611704 203121 out.go:179] * Starting "old-k8s-version-318786" primary control-plane node in "old-k8s-version-318786" cluster
I1124 13:59:42.614615 203121 cache.go:134] Beginning downloading kic base image for docker with containerd
I1124 13:59:42.617691 203121 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1124 13:59:42.620619 203121 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 13:59:42.620699 203121 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21932-2368/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4
I1124 13:59:42.620716 203121 cache.go:65] Caching tarball of preloaded images
I1124 13:59:42.620714 203121 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1124 13:59:42.620820 203121 preload.go:238] Found /home/jenkins/minikube-integration/21932-2368/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I1124 13:59:42.620838 203121 cache.go:68] Finished verifying existence of preloaded tar for v1.28.0 on containerd
I1124 13:59:42.620958 203121 profile.go:143] Saving config to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/config.json ...
I1124 13:59:42.620983 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/config.json: {Name:mkdbbadabe7d23b9f104ff19d81818950111a382 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:42.640749 203121 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1124 13:59:42.640776 203121 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1124 13:59:42.640802 203121 cache.go:240] Successfully downloaded all kic artifacts
I1124 13:59:42.640833 203121 start.go:360] acquireMachinesLock for old-k8s-version-318786: {Name:mkda208a8325231a646a1a7f876724cc4fca17ee Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:59:42.640958 203121 start.go:364] duration metric: took 103.057µs to acquireMachinesLock for "old-k8s-version-318786"
I1124 13:59:42.640986 203121 start.go:93] Provisioning new machine with config: &{Name:old-k8s-version-318786 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-318786 Namespace:default APIServerHAVIP:
APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:f
alse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 13:59:42.641059 203121 start.go:125] createHost starting for "" (driver="docker")
I1124 13:59:42.644471 203121 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1124 13:59:42.644694 203121 start.go:159] libmachine.API.Create for "old-k8s-version-318786" (driver="docker")
I1124 13:59:42.644747 203121 client.go:173] LocalClient.Create starting
I1124 13:59:42.644827 203121 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca.pem
I1124 13:59:42.644867 203121 main.go:143] libmachine: Decoding PEM data...
I1124 13:59:42.644888 203121 main.go:143] libmachine: Parsing certificate...
I1124 13:59:42.644949 203121 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21932-2368/.minikube/certs/cert.pem
I1124 13:59:42.644971 203121 main.go:143] libmachine: Decoding PEM data...
I1124 13:59:42.644986 203121 main.go:143] libmachine: Parsing certificate...
I1124 13:59:42.645338 203121 cli_runner.go:164] Run: docker network inspect old-k8s-version-318786 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1124 13:59:42.661505 203121 cli_runner.go:211] docker network inspect old-k8s-version-318786 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1124 13:59:42.661596 203121 network_create.go:284] running [docker network inspect old-k8s-version-318786] to gather additional debugging logs...
I1124 13:59:42.661615 203121 cli_runner.go:164] Run: docker network inspect old-k8s-version-318786
W1124 13:59:42.677608 203121 cli_runner.go:211] docker network inspect old-k8s-version-318786 returned with exit code 1
I1124 13:59:42.677643 203121 network_create.go:287] error running [docker network inspect old-k8s-version-318786]: docker network inspect old-k8s-version-318786: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-318786 not found
I1124 13:59:42.677659 203121 network_create.go:289] output of [docker network inspect old-k8s-version-318786]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-318786 not found
** /stderr **
I1124 13:59:42.677758 203121 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 13:59:42.694925 203121 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-e5e15b13860d IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:12:3d:37:c4:cc:77} reservation:<nil>}
I1124 13:59:42.695253 203121 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-66593a990bce IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:de:c0:9b:bc:41:ca} reservation:<nil>}
I1124 13:59:42.695642 203121 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-37e9fb0954cf IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:32:0b:6f:6e:b2:8c} reservation:<nil>}
I1124 13:59:42.695904 203121 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-5977b32dc412 IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:82:75:42:7c:e9:e6} reservation:<nil>}
I1124 13:59:42.696411 203121 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40019bcfe0}
I1124 13:59:42.696437 203121 network_create.go:124] attempt to create docker network old-k8s-version-318786 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1124 13:59:42.696498 203121 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-318786 old-k8s-version-318786
I1124 13:59:42.754268 203121 network_create.go:108] docker network old-k8s-version-318786 192.168.85.0/24 created
I1124 13:59:42.754297 203121 kic.go:121] calculated static IP "192.168.85.2" for the "old-k8s-version-318786" container
I1124 13:59:42.754382 203121 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1124 13:59:42.771474 203121 cli_runner.go:164] Run: docker volume create old-k8s-version-318786 --label name.minikube.sigs.k8s.io=old-k8s-version-318786 --label created_by.minikube.sigs.k8s.io=true
I1124 13:59:42.789916 203121 oci.go:103] Successfully created a docker volume old-k8s-version-318786
I1124 13:59:42.790028 203121 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-318786-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-318786 --entrypoint /usr/bin/test -v old-k8s-version-318786:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1124 13:59:43.376934 203121 oci.go:107] Successfully prepared a docker volume old-k8s-version-318786
I1124 13:59:43.377002 203121 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 13:59:43.377014 203121 kic.go:194] Starting extracting preloaded images to volume ...
I1124 13:59:43.377093 203121 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21932-2368/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-318786:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir
I1124 13:59:48.629782 203121 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21932-2368/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-318786:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir: (5.252643456s)
I1124 13:59:48.629823 203121 kic.go:203] duration metric: took 5.252805903s to extract preloaded images to volume ...
W1124 13:59:48.629966 203121 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1124 13:59:48.630073 203121 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1124 13:59:48.692534 203121 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-318786 --name old-k8s-version-318786 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-318786 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-318786 --network old-k8s-version-318786 --ip 192.168.85.2 --volume old-k8s-version-318786:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1124 13:59:49.023181 203121 cli_runner.go:164] Run: docker container inspect old-k8s-version-318786 --format={{.State.Running}}
I1124 13:59:49.046529 203121 cli_runner.go:164] Run: docker container inspect old-k8s-version-318786 --format={{.State.Status}}
I1124 13:59:49.073693 203121 cli_runner.go:164] Run: docker exec old-k8s-version-318786 stat /var/lib/dpkg/alternatives/iptables
I1124 13:59:49.143680 203121 oci.go:144] the created container "old-k8s-version-318786" has a running status.
I1124 13:59:49.143714 203121 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa...
I1124 13:59:49.471341 203121 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1124 13:59:49.501921 203121 cli_runner.go:164] Run: docker container inspect old-k8s-version-318786 --format={{.State.Status}}
I1124 13:59:49.532238 203121 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1124 13:59:49.532267 203121 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-318786 chown docker:docker /home/docker/.ssh/authorized_keys]
I1124 13:59:49.607023 203121 cli_runner.go:164] Run: docker container inspect old-k8s-version-318786 --format={{.State.Status}}
I1124 13:59:49.637450 203121 machine.go:94] provisionDockerMachine start ...
I1124 13:59:49.637558 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:49.663172 203121 main.go:143] libmachine: Using SSH client type: native
I1124 13:59:49.663576 203121 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33053 <nil> <nil>}
I1124 13:59:49.663586 203121 main.go:143] libmachine: About to run SSH command:
hostname
I1124 13:59:49.666892 203121 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1124 13:59:52.819647 203121 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-318786
I1124 13:59:52.819716 203121 ubuntu.go:182] provisioning hostname "old-k8s-version-318786"
I1124 13:59:52.819805 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:52.837381 203121 main.go:143] libmachine: Using SSH client type: native
I1124 13:59:52.837693 203121 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33053 <nil> <nil>}
I1124 13:59:52.837710 203121 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-318786 && echo "old-k8s-version-318786" | sudo tee /etc/hostname
I1124 13:59:53.001525 203121 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-318786
I1124 13:59:53.001631 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:53.024082 203121 main.go:143] libmachine: Using SSH client type: native
I1124 13:59:53.024554 203121 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33053 <nil> <nil>}
I1124 13:59:53.024610 203121 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-318786' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-318786/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-318786' | sudo tee -a /etc/hosts;
fi
fi
I1124 13:59:53.180483 203121 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1124 13:59:53.180555 203121 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21932-2368/.minikube CaCertPath:/home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21932-2368/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21932-2368/.minikube}
I1124 13:59:53.180601 203121 ubuntu.go:190] setting up certificates
I1124 13:59:53.180641 203121 provision.go:84] configureAuth start
I1124 13:59:53.180754 203121 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-318786
I1124 13:59:53.197870 203121 provision.go:143] copyHostCerts
I1124 13:59:53.197937 203121 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-2368/.minikube/key.pem, removing ...
I1124 13:59:53.197947 203121 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-2368/.minikube/key.pem
I1124 13:59:53.198026 203121 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21932-2368/.minikube/key.pem (1679 bytes)
I1124 13:59:53.198115 203121 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-2368/.minikube/ca.pem, removing ...
I1124 13:59:53.198120 203121 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-2368/.minikube/ca.pem
I1124 13:59:53.198145 203121 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21932-2368/.minikube/ca.pem (1082 bytes)
I1124 13:59:53.198195 203121 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-2368/.minikube/cert.pem, removing ...
I1124 13:59:53.198199 203121 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-2368/.minikube/cert.pem
I1124 13:59:53.198221 203121 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21932-2368/.minikube/cert.pem (1123 bytes)
I1124 13:59:53.198264 203121 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21932-2368/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-318786 san=[127.0.0.1 192.168.85.2 localhost minikube old-k8s-version-318786]
I1124 13:59:53.447750 203121 provision.go:177] copyRemoteCerts
I1124 13:59:53.447821 203121 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1124 13:59:53.447859 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:53.466989 203121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa Username:docker}
I1124 13:59:53.573838 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1124 13:59:53.593131 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1124 13:59:53.614562 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1124 13:59:53.634677 203121 provision.go:87] duration metric: took 453.994052ms to configureAuth
I1124 13:59:53.634716 203121 ubuntu.go:206] setting minikube options for container-runtime
I1124 13:59:53.634894 203121 config.go:182] Loaded profile config "old-k8s-version-318786": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 13:59:53.634916 203121 machine.go:97] duration metric: took 3.997446316s to provisionDockerMachine
I1124 13:59:53.634923 203121 client.go:176] duration metric: took 10.990163165s to LocalClient.Create
I1124 13:59:53.634942 203121 start.go:167] duration metric: took 10.990248318s to libmachine.API.Create "old-k8s-version-318786"
I1124 13:59:53.634951 203121 start.go:293] postStartSetup for "old-k8s-version-318786" (driver="docker")
I1124 13:59:53.634967 203121 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1124 13:59:53.635028 203121 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1124 13:59:53.635072 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:53.651615 203121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa Username:docker}
I1124 13:59:53.760351 203121 ssh_runner.go:195] Run: cat /etc/os-release
I1124 13:59:53.763787 203121 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1124 13:59:53.763818 203121 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1124 13:59:53.763831 203121 filesync.go:126] Scanning /home/jenkins/minikube-integration/21932-2368/.minikube/addons for local assets ...
I1124 13:59:53.763886 203121 filesync.go:126] Scanning /home/jenkins/minikube-integration/21932-2368/.minikube/files for local assets ...
I1124 13:59:53.764002 203121 filesync.go:149] local asset: /home/jenkins/minikube-integration/21932-2368/.minikube/files/etc/ssl/certs/41782.pem -> 41782.pem in /etc/ssl/certs
I1124 13:59:53.764116 203121 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1124 13:59:53.771607 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/files/etc/ssl/certs/41782.pem --> /etc/ssl/certs/41782.pem (1708 bytes)
I1124 13:59:53.790229 203121 start.go:296] duration metric: took 155.256983ms for postStartSetup
I1124 13:59:53.790653 203121 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-318786
I1124 13:59:53.807439 203121 profile.go:143] Saving config to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/config.json ...
I1124 13:59:53.807757 203121 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1124 13:59:53.807816 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:53.825527 203121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa Username:docker}
I1124 13:59:53.928742 203121 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1124 13:59:53.933408 203121 start.go:128] duration metric: took 11.29232535s to createHost
I1124 13:59:53.933433 203121 start.go:83] releasing machines lock for "old-k8s-version-318786", held for 11.292464025s
I1124 13:59:53.933507 203121 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-318786
I1124 13:59:53.950335 203121 ssh_runner.go:195] Run: cat /version.json
I1124 13:59:53.950395 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:53.950688 203121 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1124 13:59:53.950748 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:53.969960 203121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa Username:docker}
I1124 13:59:53.970283 203121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa Username:docker}
I1124 13:59:54.187220 203121 ssh_runner.go:195] Run: systemctl --version
I1124 13:59:54.193763 203121 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1124 13:59:54.197792 203121 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1124 13:59:54.197862 203121 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1124 13:59:54.225219 203121 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I1124 13:59:54.225241 203121 start.go:496] detecting cgroup driver to use...
I1124 13:59:54.225273 203121 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1124 13:59:54.225319 203121 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1124 13:59:54.240905 203121 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1124 13:59:54.255129 203121 docker.go:218] disabling cri-docker service (if available) ...
I1124 13:59:54.255221 203121 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1124 13:59:54.274287 203121 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1124 13:59:54.293183 203121 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1124 13:59:54.421827 203121 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1124 13:59:54.546597 203121 docker.go:234] disabling docker service ...
I1124 13:59:54.546687 203121 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1124 13:59:54.569497 203121 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1124 13:59:54.583215 203121 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1124 13:59:54.700724 203121 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1124 13:59:54.819165 203121 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1124 13:59:54.832231 203121 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1124 13:59:54.851866 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1124 13:59:54.862178 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1124 13:59:54.871620 203121 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1124 13:59:54.871738 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1124 13:59:54.882231 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 13:59:54.891717 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1124 13:59:54.901467 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 13:59:54.910294 203121 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1124 13:59:54.918660 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1124 13:59:54.927868 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1124 13:59:54.937082 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1124 13:59:54.946216 203121 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1124 13:59:54.954056 203121 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1124 13:59:54.961958 203121 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:59:55.097492 203121 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1124 13:59:55.230526 203121 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1124 13:59:55.230649 203121 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1124 13:59:55.234998 203121 start.go:564] Will wait 60s for crictl version
I1124 13:59:55.235132 203121 ssh_runner.go:195] Run: which crictl
I1124 13:59:55.238882 203121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1124 13:59:55.268214 203121 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1124 13:59:55.268356 203121 ssh_runner.go:195] Run: containerd --version
I1124 13:59:55.288303 203121 ssh_runner.go:195] Run: containerd --version
I1124 13:59:55.314523 203121 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1124 13:59:55.317381 203121 cli_runner.go:164] Run: docker network inspect old-k8s-version-318786 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 13:59:55.334289 203121 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I1124 13:59:55.338412 203121 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 13:59:55.348875 203121 kubeadm.go:884] updating cluster {Name:old-k8s-version-318786 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-318786 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1124 13:59:55.349007 203121 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 13:59:55.349078 203121 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 13:59:55.373604 203121 containerd.go:627] all images are preloaded for containerd runtime.
I1124 13:59:55.373629 203121 containerd.go:534] Images already preloaded, skipping extraction
I1124 13:59:55.373693 203121 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 13:59:55.398685 203121 containerd.go:627] all images are preloaded for containerd runtime.
I1124 13:59:55.398711 203121 cache_images.go:86] Images are preloaded, skipping loading
I1124 13:59:55.398719 203121 kubeadm.go:935] updating node { 192.168.85.2 8443 v1.28.0 containerd true true} ...
I1124 13:59:55.398825 203121 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-318786 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-318786 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1124 13:59:55.398898 203121 ssh_runner.go:195] Run: sudo crictl info
I1124 13:59:55.429304 203121 cni.go:84] Creating CNI manager for ""
I1124 13:59:55.429328 203121 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 13:59:55.429372 203121 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1124 13:59:55.429403 203121 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-318786 NodeName:old-k8s-version-318786 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt
StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1124 13:59:55.429550 203121 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-318786"
kubeletExtraArgs:
node-ip: 192.168.85.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1124 13:59:55.429622 203121 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1124 13:59:55.437772 203121 binaries.go:51] Found k8s binaries, skipping transfer
I1124 13:59:55.437895 203121 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1124 13:59:55.445856 203121 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1124 13:59:55.459167 203121 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1124 13:59:55.473519 203121 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2176 bytes)
I1124 13:59:55.487760 203121 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I1124 13:59:55.491722 203121 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 13:59:55.502994 203121 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:59:55.625341 203121 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 13:59:55.647018 203121 certs.go:69] Setting up /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786 for IP: 192.168.85.2
I1124 13:59:55.647099 203121 certs.go:195] generating shared ca certs ...
I1124 13:59:55.647130 203121 certs.go:227] acquiring lock for ca certs: {Name:mkcd8707c782acde0e57168c044a3df942dc4ab1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:55.647322 203121 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21932-2368/.minikube/ca.key
I1124 13:59:55.647396 203121 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21932-2368/.minikube/proxy-client-ca.key
I1124 13:59:55.647432 203121 certs.go:257] generating profile certs ...
I1124 13:59:55.647513 203121 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/client.key
I1124 13:59:55.647551 203121 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/client.crt with IP's: []
I1124 13:59:56.033129 203121 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/client.crt ...
I1124 13:59:56.033212 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/client.crt: {Name:mk69bb915606644e0645060fa46449dd65f83095 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:56.033449 203121 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/client.key ...
I1124 13:59:56.033488 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/client.key: {Name:mkfa34a7c8b2d69c736fc1cfd2304ae49133ac4b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:56.033640 203121 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.key.251f69ae
I1124 13:59:56.033684 203121 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.crt.251f69ae with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.85.2]
I1124 13:59:56.281567 203121 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.crt.251f69ae ...
I1124 13:59:56.281598 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.crt.251f69ae: {Name:mk572ed713bf0eec1d0b840d076729a08786aff7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:56.281810 203121 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.key.251f69ae ...
I1124 13:59:56.281825 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.key.251f69ae: {Name:mk6b112bdb309b7ed87e7e056627f1c30ccc769a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:56.281918 203121 certs.go:382] copying /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.crt.251f69ae -> /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.crt
I1124 13:59:56.281996 203121 certs.go:386] copying /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.key.251f69ae -> /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.key
I1124 13:59:56.282057 203121 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.key
I1124 13:59:56.282077 203121 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.crt with IP's: []
I1124 13:59:56.404952 203121 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.crt ...
I1124 13:59:56.404984 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.crt: {Name:mk506f5bcd13da36d0e32b27db8471ef560cbc9f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:56.405167 203121 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.key ...
I1124 13:59:56.405182 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.key: {Name:mk63bb1c02064c41d85f1d8bf24cb0b4a26d687a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:56.405366 203121 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/4178.pem (1338 bytes)
W1124 13:59:56.405416 203121 certs.go:480] ignoring /home/jenkins/minikube-integration/21932-2368/.minikube/certs/4178_empty.pem, impossibly tiny 0 bytes
I1124 13:59:56.405425 203121 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca-key.pem (1679 bytes)
I1124 13:59:56.405454 203121 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca.pem (1082 bytes)
I1124 13:59:56.405487 203121 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/cert.pem (1123 bytes)
I1124 13:59:56.405517 203121 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/key.pem (1679 bytes)
I1124 13:59:56.405566 203121 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-2368/.minikube/files/etc/ssl/certs/41782.pem (1708 bytes)
I1124 13:59:56.406138 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1124 13:59:56.425371 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1124 13:59:56.445638 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1124 13:59:56.465263 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1124 13:59:56.484806 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1124 13:59:56.503229 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1124 13:59:56.526839 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1124 13:59:56.546328 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1124 13:59:56.568330 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/certs/4178.pem --> /usr/share/ca-certificates/4178.pem (1338 bytes)
I1124 13:59:56.588914 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/files/etc/ssl/certs/41782.pem --> /usr/share/ca-certificates/41782.pem (1708 bytes)
I1124 13:59:56.609069 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1124 13:59:56.635519 203121 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1124 13:59:56.649021 203121 ssh_runner.go:195] Run: openssl version
I1124 13:59:56.655362 203121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/41782.pem && ln -fs /usr/share/ca-certificates/41782.pem /etc/ssl/certs/41782.pem"
I1124 13:59:56.664078 203121 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/41782.pem
I1124 13:59:56.667939 203121 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 24 13:21 /usr/share/ca-certificates/41782.pem
I1124 13:59:56.668018 203121 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/41782.pem
I1124 13:59:56.709276 203121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/41782.pem /etc/ssl/certs/3ec20f2e.0"
I1124 13:59:56.717713 203121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1124 13:59:56.725687 203121 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1124 13:59:56.729416 203121 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 24 13:14 /usr/share/ca-certificates/minikubeCA.pem
I1124 13:59:56.729511 203121 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1124 13:59:56.771028 203121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1124 13:59:56.779345 203121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/4178.pem && ln -fs /usr/share/ca-certificates/4178.pem /etc/ssl/certs/4178.pem"
I1124 13:59:56.787738 203121 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/4178.pem
I1124 13:59:56.792183 203121 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 24 13:21 /usr/share/ca-certificates/4178.pem
I1124 13:59:56.792289 203121 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/4178.pem
I1124 13:59:56.833374 203121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/4178.pem /etc/ssl/certs/51391683.0"
I1124 13:59:56.841910 203121 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1124 13:59:56.845538 203121 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1124 13:59:56.845595 203121 kubeadm.go:401] StartCluster: {Name:old-k8s-version-318786 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-318786 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 13:59:56.845673 203121 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1124 13:59:56.845734 203121 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1124 13:59:56.874154 203121 cri.go:89] found id: ""
I1124 13:59:56.874225 203121 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1124 13:59:56.882169 203121 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1124 13:59:56.890196 203121 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1124 13:59:56.890264 203121 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1124 13:59:56.898559 203121 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1124 13:59:56.898579 203121 kubeadm.go:158] found existing configuration files:
I1124 13:59:56.898629 203121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1124 13:59:56.906476 203121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1124 13:59:56.906616 203121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1124 13:59:56.914551 203121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1124 13:59:56.922673 203121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1124 13:59:56.922748 203121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1124 13:59:56.931103 203121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1124 13:59:56.939465 203121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1124 13:59:56.939567 203121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1124 13:59:56.947086 203121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1124 13:59:56.955210 203121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1124 13:59:56.955302 203121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1124 13:59:56.963184 203121 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1124 13:59:57.020344 203121 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1124 13:59:57.020647 203121 kubeadm.go:319] [preflight] Running pre-flight checks
I1124 13:59:57.059971 203121 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1124 13:59:57.060049 203121 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I1124 13:59:57.060090 203121 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1124 13:59:57.060146 203121 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1124 13:59:57.060199 203121 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1124 13:59:57.060249 203121 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1124 13:59:57.060302 203121 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1124 13:59:57.060354 203121 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1124 13:59:57.060407 203121 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1124 13:59:57.060457 203121 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1124 13:59:57.060509 203121 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1124 13:59:57.060558 203121 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1124 13:59:57.153578 203121 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1124 13:59:57.153733 203121 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1124 13:59:57.153905 203121 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1124 13:59:57.330900 203121 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1124 13:59:57.336703 203121 out.go:252] - Generating certificates and keys ...
I1124 13:59:57.336796 203121 kubeadm.go:319] [certs] Using existing ca certificate authority
I1124 13:59:57.336870 203121 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1124 13:59:57.865889 203121 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1124 13:59:58.185353 203121 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1124 13:59:59.130735 203121 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1124 13:59:59.642294 203121 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1124 13:59:59.906079 203121 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1124 13:59:59.906451 203121 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-318786] and IPs [192.168.85.2 127.0.0.1 ::1]
I1124 14:00:00.123407 203121 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1124 14:00:00.123551 203121 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-318786] and IPs [192.168.85.2 127.0.0.1 ::1]
I1124 14:00:00.270762 203121 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1124 14:00:01.217860 203121 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1124 14:00:01.724986 203121 kubeadm.go:319] [certs] Generating "sa" key and public key
I1124 14:00:01.740375 203121 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1124 14:00:02.910438 203121 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1124 14:00:03.183161 203121 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1124 14:00:03.857453 203121 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1124 14:00:04.272263 203121 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1124 14:00:04.273275 203121 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1124 14:00:04.276092 203121 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1124 14:00:04.279552 203121 out.go:252] - Booting up control plane ...
I1124 14:00:04.279655 203121 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1124 14:00:04.279733 203121 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1124 14:00:04.279800 203121 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1124 14:00:04.298211 203121 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1124 14:00:04.298994 203121 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1124 14:00:04.299292 203121 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1124 14:00:04.444292 203121 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1124 14:00:11.451050 203121 kubeadm.go:319] [apiclient] All control plane components are healthy after 7.006841 seconds
I1124 14:00:11.451179 203121 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1124 14:00:11.470632 203121 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1124 14:00:12.039593 203121 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1124 14:00:12.039804 203121 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-318786 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1124 14:00:12.553048 203121 kubeadm.go:319] [bootstrap-token] Using token: lzgex3.uugtb4pr04721m2a
I1124 14:00:12.555996 203121 out.go:252] - Configuring RBAC rules ...
I1124 14:00:12.556126 203121 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1124 14:00:12.561765 203121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1124 14:00:12.571340 203121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1124 14:00:12.578855 203121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1124 14:00:12.583307 203121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1124 14:00:12.587409 203121 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1124 14:00:12.604367 203121 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1124 14:00:12.927512 203121 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1124 14:00:12.998711 203121 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1124 14:00:13.009044 203121 kubeadm.go:319]
I1124 14:00:13.009136 203121 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1124 14:00:13.009150 203121 kubeadm.go:319]
I1124 14:00:13.009228 203121 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1124 14:00:13.009237 203121 kubeadm.go:319]
I1124 14:00:13.009262 203121 kubeadm.go:319] mkdir -p $HOME/.kube
I1124 14:00:13.009867 203121 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1124 14:00:13.009932 203121 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1124 14:00:13.009946 203121 kubeadm.go:319]
I1124 14:00:13.010001 203121 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1124 14:00:13.010013 203121 kubeadm.go:319]
I1124 14:00:13.010061 203121 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1124 14:00:13.010069 203121 kubeadm.go:319]
I1124 14:00:13.010122 203121 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1124 14:00:13.010202 203121 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1124 14:00:13.010274 203121 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1124 14:00:13.010281 203121 kubeadm.go:319]
I1124 14:00:13.010670 203121 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1124 14:00:13.010761 203121 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1124 14:00:13.010771 203121 kubeadm.go:319]
I1124 14:00:13.011083 203121 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token lzgex3.uugtb4pr04721m2a \
I1124 14:00:13.011197 203121 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:aa948289582a95f47bab77808ca51e5d74f41a914fe1740ab9448815f8011aac \
I1124 14:00:13.011480 203121 kubeadm.go:319] --control-plane
I1124 14:00:13.011502 203121 kubeadm.go:319]
I1124 14:00:13.011780 203121 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1124 14:00:13.011795 203121 kubeadm.go:319]
I1124 14:00:13.012105 203121 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token lzgex3.uugtb4pr04721m2a \
I1124 14:00:13.012432 203121 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:aa948289582a95f47bab77808ca51e5d74f41a914fe1740ab9448815f8011aac
I1124 14:00:13.016246 203121 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I1124 14:00:13.016372 203121 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1124 14:00:13.016396 203121 cni.go:84] Creating CNI manager for ""
I1124 14:00:13.016409 203121 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 14:00:13.019688 203121 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1124 14:00:13.022683 203121 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1124 14:00:13.034888 203121 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1124 14:00:13.034906 203121 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1124 14:00:13.059514 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1124 14:00:14.290955 203121 ssh_runner.go:235] Completed: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml: (1.231358047s)
I1124 14:00:14.291008 203121 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1124 14:00:14.291124 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:14.291189 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-318786 minikube.k8s.io/updated_at=2025_11_24T14_00_14_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=b5d1c9f4e75f4e638a533695fd62619949cefcab minikube.k8s.io/name=old-k8s-version-318786 minikube.k8s.io/primary=true
I1124 14:00:14.451653 203121 ops.go:34] apiserver oom_adj: -16
I1124 14:00:14.451772 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:14.952612 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:15.452444 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:15.952508 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:16.452482 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:16.952838 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:17.452425 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:17.951984 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:18.452384 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:18.952884 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:19.452844 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:19.951825 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:20.452041 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:20.954241 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:21.452323 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:21.952432 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:22.451804 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:22.951865 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:23.452374 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:23.952376 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:24.452544 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:24.952573 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:25.451889 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:25.951879 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:26.060844 203121 kubeadm.go:1114] duration metric: took 11.769763814s to wait for elevateKubeSystemPrivileges
I1124 14:00:26.060873 203121 kubeadm.go:403] duration metric: took 29.215284106s to StartCluster
I1124 14:00:26.060891 203121 settings.go:142] acquiring lock: {Name:mk2b0bbff4d8ced468f457362668d43b813dc062 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 14:00:26.060955 203121 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21932-2368/kubeconfig
I1124 14:00:26.061937 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/kubeconfig: {Name:mk246d21eaffbd8aca2abdc1b2f89d6fcc902f28 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 14:00:26.062157 203121 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 14:00:26.062320 203121 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1124 14:00:26.062598 203121 config.go:182] Loaded profile config "old-k8s-version-318786": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 14:00:26.062635 203121 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1124 14:00:26.062693 203121 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-318786"
I1124 14:00:26.062708 203121 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-318786"
I1124 14:00:26.062728 203121 host.go:66] Checking if "old-k8s-version-318786" exists ...
I1124 14:00:26.063138 203121 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-318786"
I1124 14:00:26.063163 203121 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-318786"
I1124 14:00:26.063454 203121 cli_runner.go:164] Run: docker container inspect old-k8s-version-318786 --format={{.State.Status}}
I1124 14:00:26.063514 203121 cli_runner.go:164] Run: docker container inspect old-k8s-version-318786 --format={{.State.Status}}
I1124 14:00:26.066058 203121 out.go:179] * Verifying Kubernetes components...
I1124 14:00:26.069103 203121 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 14:00:26.111201 203121 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1124 14:00:26.116253 203121 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-318786"
I1124 14:00:26.116292 203121 host.go:66] Checking if "old-k8s-version-318786" exists ...
I1124 14:00:26.116709 203121 cli_runner.go:164] Run: docker container inspect old-k8s-version-318786 --format={{.State.Status}}
I1124 14:00:26.116830 203121 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1124 14:00:26.116844 203121 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1124 14:00:26.116892 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 14:00:26.150658 203121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa Username:docker}
I1124 14:00:26.161222 203121 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1124 14:00:26.161243 203121 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1124 14:00:26.161315 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 14:00:26.189630 203121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa Username:docker}
I1124 14:00:26.424488 203121 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.85.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1124 14:00:26.425624 203121 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 14:00:26.485066 203121 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1124 14:00:26.513639 203121 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1124 14:00:27.429647 203121 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (1.003904202s)
I1124 14:00:27.430570 203121 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-318786" to be "Ready" ...
I1124 14:00:27.431403 203121 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.85.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.005751061s)
I1124 14:00:27.431468 203121 start.go:977] {"host.minikube.internal": 192.168.85.1} host record injected into CoreDNS's ConfigMap
I1124 14:00:27.809398 203121 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.324299578s)
I1124 14:00:27.809491 203121 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (1.295828834s)
I1124 14:00:27.819279 203121 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1124 14:00:27.822260 203121 addons.go:530] duration metric: took 1.759614941s for enable addons: enabled=[storage-provisioner default-storageclass]
I1124 14:00:27.936206 203121 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-318786" context rescaled to 1 replicas
W1124 14:00:29.434630 203121 node_ready.go:57] node "old-k8s-version-318786" has "Ready":"False" status (will retry)
W1124 14:00:31.933623 203121 node_ready.go:57] node "old-k8s-version-318786" has "Ready":"False" status (will retry)
W1124 14:00:33.934195 203121 node_ready.go:57] node "old-k8s-version-318786" has "Ready":"False" status (will retry)
W1124 14:00:36.434086 203121 node_ready.go:57] node "old-k8s-version-318786" has "Ready":"False" status (will retry)
W1124 14:00:38.434475 203121 node_ready.go:57] node "old-k8s-version-318786" has "Ready":"False" status (will retry)
I1124 14:00:39.437064 203121 node_ready.go:49] node "old-k8s-version-318786" is "Ready"
I1124 14:00:39.437091 203121 node_ready.go:38] duration metric: took 12.006466784s for node "old-k8s-version-318786" to be "Ready" ...
I1124 14:00:39.437104 203121 api_server.go:52] waiting for apiserver process to appear ...
I1124 14:00:39.437165 203121 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1124 14:00:39.453227 203121 api_server.go:72] duration metric: took 13.391041621s to wait for apiserver process to appear ...
I1124 14:00:39.453251 203121 api_server.go:88] waiting for apiserver healthz status ...
I1124 14:00:39.453271 203121 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1124 14:00:39.462068 203121 api_server.go:279] https://192.168.85.2:8443/healthz returned 200:
ok
I1124 14:00:39.463530 203121 api_server.go:141] control plane version: v1.28.0
I1124 14:00:39.463554 203121 api_server.go:131] duration metric: took 10.295662ms to wait for apiserver health ...
I1124 14:00:39.463563 203121 system_pods.go:43] waiting for kube-system pods to appear ...
I1124 14:00:39.467352 203121 system_pods.go:59] 8 kube-system pods found
I1124 14:00:39.467391 203121 system_pods.go:61] "coredns-5dd5756b68-n7s8h" [72202b02-1ca2-4c69-ad47-3f1ef90ba8ba] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 14:00:39.467397 203121 system_pods.go:61] "etcd-old-k8s-version-318786" [dd78fff3-f901-4dc0-9c77-907dbb69e36d] Running
I1124 14:00:39.467402 203121 system_pods.go:61] "kindnet-z4rkx" [053d781f-846e-4391-a537-edd057019339] Running
I1124 14:00:39.467406 203121 system_pods.go:61] "kube-apiserver-old-k8s-version-318786" [7f0596ec-97f5-4a70-974d-38c5d9a51273] Running
I1124 14:00:39.467410 203121 system_pods.go:61] "kube-controller-manager-old-k8s-version-318786" [4ae0e32a-b5f2-4e37-82d1-d76bfabbedd5] Running
I1124 14:00:39.467414 203121 system_pods.go:61] "kube-proxy-jwmdg" [11a8b197-dd22-45df-9593-66d16fdefa80] Running
I1124 14:00:39.467418 203121 system_pods.go:61] "kube-scheduler-old-k8s-version-318786" [01641e80-7a9e-48c2-b9e3-d384beab62d7] Running
I1124 14:00:39.467423 203121 system_pods.go:61] "storage-provisioner" [2298aa73-9529-42f0-a0ec-22197acfa4ba] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 14:00:39.467428 203121 system_pods.go:74] duration metric: took 3.859916ms to wait for pod list to return data ...
I1124 14:00:39.467435 203121 default_sa.go:34] waiting for default service account to be created ...
I1124 14:00:39.470040 203121 default_sa.go:45] found service account: "default"
I1124 14:00:39.470060 203121 default_sa.go:55] duration metric: took 2.619768ms for default service account to be created ...
I1124 14:00:39.470070 203121 system_pods.go:116] waiting for k8s-apps to be running ...
I1124 14:00:39.473490 203121 system_pods.go:86] 8 kube-system pods found
I1124 14:00:39.473522 203121 system_pods.go:89] "coredns-5dd5756b68-n7s8h" [72202b02-1ca2-4c69-ad47-3f1ef90ba8ba] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 14:00:39.473528 203121 system_pods.go:89] "etcd-old-k8s-version-318786" [dd78fff3-f901-4dc0-9c77-907dbb69e36d] Running
I1124 14:00:39.473534 203121 system_pods.go:89] "kindnet-z4rkx" [053d781f-846e-4391-a537-edd057019339] Running
I1124 14:00:39.473539 203121 system_pods.go:89] "kube-apiserver-old-k8s-version-318786" [7f0596ec-97f5-4a70-974d-38c5d9a51273] Running
I1124 14:00:39.473543 203121 system_pods.go:89] "kube-controller-manager-old-k8s-version-318786" [4ae0e32a-b5f2-4e37-82d1-d76bfabbedd5] Running
I1124 14:00:39.473547 203121 system_pods.go:89] "kube-proxy-jwmdg" [11a8b197-dd22-45df-9593-66d16fdefa80] Running
I1124 14:00:39.473552 203121 system_pods.go:89] "kube-scheduler-old-k8s-version-318786" [01641e80-7a9e-48c2-b9e3-d384beab62d7] Running
I1124 14:00:39.473558 203121 system_pods.go:89] "storage-provisioner" [2298aa73-9529-42f0-a0ec-22197acfa4ba] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 14:00:39.473585 203121 retry.go:31] will retry after 299.487693ms: missing components: kube-dns
I1124 14:00:39.780995 203121 system_pods.go:86] 8 kube-system pods found
I1124 14:00:39.781029 203121 system_pods.go:89] "coredns-5dd5756b68-n7s8h" [72202b02-1ca2-4c69-ad47-3f1ef90ba8ba] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 14:00:39.781036 203121 system_pods.go:89] "etcd-old-k8s-version-318786" [dd78fff3-f901-4dc0-9c77-907dbb69e36d] Running
I1124 14:00:39.781043 203121 system_pods.go:89] "kindnet-z4rkx" [053d781f-846e-4391-a537-edd057019339] Running
I1124 14:00:39.781047 203121 system_pods.go:89] "kube-apiserver-old-k8s-version-318786" [7f0596ec-97f5-4a70-974d-38c5d9a51273] Running
I1124 14:00:39.781051 203121 system_pods.go:89] "kube-controller-manager-old-k8s-version-318786" [4ae0e32a-b5f2-4e37-82d1-d76bfabbedd5] Running
I1124 14:00:39.781055 203121 system_pods.go:89] "kube-proxy-jwmdg" [11a8b197-dd22-45df-9593-66d16fdefa80] Running
I1124 14:00:39.781061 203121 system_pods.go:89] "kube-scheduler-old-k8s-version-318786" [01641e80-7a9e-48c2-b9e3-d384beab62d7] Running
I1124 14:00:39.781067 203121 system_pods.go:89] "storage-provisioner" [2298aa73-9529-42f0-a0ec-22197acfa4ba] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 14:00:39.781080 203121 retry.go:31] will retry after 249.97776ms: missing components: kube-dns
I1124 14:00:40.063092 203121 system_pods.go:86] 8 kube-system pods found
I1124 14:00:40.063130 203121 system_pods.go:89] "coredns-5dd5756b68-n7s8h" [72202b02-1ca2-4c69-ad47-3f1ef90ba8ba] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 14:00:40.063139 203121 system_pods.go:89] "etcd-old-k8s-version-318786" [dd78fff3-f901-4dc0-9c77-907dbb69e36d] Running
I1124 14:00:40.063145 203121 system_pods.go:89] "kindnet-z4rkx" [053d781f-846e-4391-a537-edd057019339] Running
I1124 14:00:40.063149 203121 system_pods.go:89] "kube-apiserver-old-k8s-version-318786" [7f0596ec-97f5-4a70-974d-38c5d9a51273] Running
I1124 14:00:40.063180 203121 system_pods.go:89] "kube-controller-manager-old-k8s-version-318786" [4ae0e32a-b5f2-4e37-82d1-d76bfabbedd5] Running
I1124 14:00:40.063193 203121 system_pods.go:89] "kube-proxy-jwmdg" [11a8b197-dd22-45df-9593-66d16fdefa80] Running
I1124 14:00:40.063198 203121 system_pods.go:89] "kube-scheduler-old-k8s-version-318786" [01641e80-7a9e-48c2-b9e3-d384beab62d7] Running
I1124 14:00:40.063203 203121 system_pods.go:89] "storage-provisioner" [2298aa73-9529-42f0-a0ec-22197acfa4ba] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 14:00:40.063219 203121 retry.go:31] will retry after 436.268576ms: missing components: kube-dns
I1124 14:00:40.504166 203121 system_pods.go:86] 8 kube-system pods found
I1124 14:00:40.504245 203121 system_pods.go:89] "coredns-5dd5756b68-n7s8h" [72202b02-1ca2-4c69-ad47-3f1ef90ba8ba] Running
I1124 14:00:40.504259 203121 system_pods.go:89] "etcd-old-k8s-version-318786" [dd78fff3-f901-4dc0-9c77-907dbb69e36d] Running
I1124 14:00:40.504264 203121 system_pods.go:89] "kindnet-z4rkx" [053d781f-846e-4391-a537-edd057019339] Running
I1124 14:00:40.504269 203121 system_pods.go:89] "kube-apiserver-old-k8s-version-318786" [7f0596ec-97f5-4a70-974d-38c5d9a51273] Running
I1124 14:00:40.504274 203121 system_pods.go:89] "kube-controller-manager-old-k8s-version-318786" [4ae0e32a-b5f2-4e37-82d1-d76bfabbedd5] Running
I1124 14:00:40.504279 203121 system_pods.go:89] "kube-proxy-jwmdg" [11a8b197-dd22-45df-9593-66d16fdefa80] Running
I1124 14:00:40.504283 203121 system_pods.go:89] "kube-scheduler-old-k8s-version-318786" [01641e80-7a9e-48c2-b9e3-d384beab62d7] Running
I1124 14:00:40.504287 203121 system_pods.go:89] "storage-provisioner" [2298aa73-9529-42f0-a0ec-22197acfa4ba] Running
I1124 14:00:40.504296 203121 system_pods.go:126] duration metric: took 1.034219513s to wait for k8s-apps to be running ...
I1124 14:00:40.504307 203121 system_svc.go:44] waiting for kubelet service to be running ....
I1124 14:00:40.504364 203121 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 14:00:40.517880 203121 system_svc.go:56] duration metric: took 13.563315ms WaitForService to wait for kubelet
I1124 14:00:40.517964 203121 kubeadm.go:587] duration metric: took 14.455781279s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 14:00:40.517991 203121 node_conditions.go:102] verifying NodePressure condition ...
I1124 14:00:40.520930 203121 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1124 14:00:40.520963 203121 node_conditions.go:123] node cpu capacity is 2
I1124 14:00:40.520978 203121 node_conditions.go:105] duration metric: took 2.980003ms to run NodePressure ...
I1124 14:00:40.520990 203121 start.go:242] waiting for startup goroutines ...
I1124 14:00:40.520998 203121 start.go:247] waiting for cluster config update ...
I1124 14:00:40.521010 203121 start.go:256] writing updated cluster config ...
I1124 14:00:40.521298 203121 ssh_runner.go:195] Run: rm -f paused
I1124 14:00:40.525324 203121 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 14:00:40.529797 203121 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-n7s8h" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:40.535607 203121 pod_ready.go:94] pod "coredns-5dd5756b68-n7s8h" is "Ready"
I1124 14:00:40.535639 203121 pod_ready.go:86] duration metric: took 5.816258ms for pod "coredns-5dd5756b68-n7s8h" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:40.539181 203121 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:40.544657 203121 pod_ready.go:94] pod "etcd-old-k8s-version-318786" is "Ready"
I1124 14:00:40.544685 203121 pod_ready.go:86] duration metric: took 5.478924ms for pod "etcd-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:40.548165 203121 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:40.553506 203121 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-318786" is "Ready"
I1124 14:00:40.553538 203121 pod_ready.go:86] duration metric: took 5.343284ms for pod "kube-apiserver-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:40.556924 203121 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:40.929692 203121 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-318786" is "Ready"
I1124 14:00:40.929725 203121 pod_ready.go:86] duration metric: took 372.7723ms for pod "kube-controller-manager-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:41.130990 203121 pod_ready.go:83] waiting for pod "kube-proxy-jwmdg" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:41.530005 203121 pod_ready.go:94] pod "kube-proxy-jwmdg" is "Ready"
I1124 14:00:41.530034 203121 pod_ready.go:86] duration metric: took 399.016962ms for pod "kube-proxy-jwmdg" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:41.730026 203121 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:42.131071 203121 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-318786" is "Ready"
I1124 14:00:42.131114 203121 pod_ready.go:86] duration metric: took 401.061008ms for pod "kube-scheduler-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:42.131129 203121 pod_ready.go:40] duration metric: took 1.60575817s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 14:00:42.200914 203121 start.go:625] kubectl: 1.33.2, cluster: 1.28.0 (minor skew: 5)
I1124 14:00:42.204172 203121 out.go:203]
W1124 14:00:42.207213 203121 out.go:285] ! /usr/local/bin/kubectl is version 1.33.2, which may have incompatibilities with Kubernetes 1.28.0.
I1124 14:00:42.210285 203121 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1124 14:00:42.214390 203121 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-318786" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
70e558ad037eb 1611cd07b61d5 7 seconds ago Running busybox 0 f472329e9fd63 busybox default
33ca9b6d24a80 ba04bb24b9575 13 seconds ago Running storage-provisioner 0 9de766e43deb4 storage-provisioner kube-system
d1e4297a18de5 97e04611ad434 13 seconds ago Running coredns 0 1b5cb0ca09af2 coredns-5dd5756b68-n7s8h kube-system
8a5ceb46ea7cb b1a8c6f707935 24 seconds ago Running kindnet-cni 0 e9b4fd516b97c kindnet-z4rkx kube-system
e431b25999ece 940f54a5bcae9 26 seconds ago Running kube-proxy 0 68384e9c54fe8 kube-proxy-jwmdg kube-system
64ea1db6adeec 00543d2fe5d71 46 seconds ago Running kube-apiserver 0 6e10952c6964b kube-apiserver-old-k8s-version-318786 kube-system
d422fb0577ca7 46cc66ccc7c19 46 seconds ago Running kube-controller-manager 0 ede8e07dcdc74 kube-controller-manager-old-k8s-version-318786 kube-system
0769df21ce83c 762dce4090c5f 46 seconds ago Running kube-scheduler 0 6729e51d9cdf6 kube-scheduler-old-k8s-version-318786 kube-system
a96dcde7b48e2 9cdd6470f48c8 47 seconds ago Running etcd 0 388ca052bc258 etcd-old-k8s-version-318786 kube-system
==> containerd <==
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.610323957Z" level=info msg="connecting to shim d1e4297a18de5a35eef1e955a0f6b73d8881ba2296e59d8acaed4614dce5de51" address="unix:///run/containerd/s/f62f275e67577be37030e893196dc98d73b2044e58d241d1a7f99ccee4904d24" protocol=ttrpc version=3
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.620434869Z" level=info msg="CreateContainer within sandbox \"9de766e43deb416449962bc7301bab891c72b0af9fb329bb4d8e4ff8ef66bff4\" for container &ContainerMetadata{Name:storage-provisioner,Attempt:0,}"
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.644253353Z" level=info msg="Container 33ca9b6d24a80a1f0470355c5dc5bf87df622a7ffd33dad20b3a66e3d42820fb: CDI devices from CRI Config.CDIDevices: []"
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.663968611Z" level=info msg="CreateContainer within sandbox \"9de766e43deb416449962bc7301bab891c72b0af9fb329bb4d8e4ff8ef66bff4\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"33ca9b6d24a80a1f0470355c5dc5bf87df622a7ffd33dad20b3a66e3d42820fb\""
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.665502745Z" level=info msg="StartContainer for \"33ca9b6d24a80a1f0470355c5dc5bf87df622a7ffd33dad20b3a66e3d42820fb\""
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.666531739Z" level=info msg="connecting to shim 33ca9b6d24a80a1f0470355c5dc5bf87df622a7ffd33dad20b3a66e3d42820fb" address="unix:///run/containerd/s/25a7b18f3f0941131e8c32d45d1f9f3bcee38bf8a73b1e3195d36d7532fce44f" protocol=ttrpc version=3
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.711141089Z" level=info msg="StartContainer for \"d1e4297a18de5a35eef1e955a0f6b73d8881ba2296e59d8acaed4614dce5de51\" returns successfully"
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.756705480Z" level=info msg="StartContainer for \"33ca9b6d24a80a1f0470355c5dc5bf87df622a7ffd33dad20b3a66e3d42820fb\" returns successfully"
Nov 24 14:00:42 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:42.744539553Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:f566ecf0-6907-464c-9185-0f1cac06d38f,Namespace:default,Attempt:0,}"
Nov 24 14:00:42 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:42.796555926Z" level=info msg="connecting to shim f472329e9fd635f4d2ecb8d02d86100f8c593bf1ea6b1e68f6aab8b27bbcb144" address="unix:///run/containerd/s/e47b16e174c686888228b35f0ff63c9e1e5e13d47c7f7c2e532fdeedd0981c84" namespace=k8s.io protocol=ttrpc version=3
Nov 24 14:00:42 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:42.853864201Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:f566ecf0-6907-464c-9185-0f1cac06d38f,Namespace:default,Attempt:0,} returns sandbox id \"f472329e9fd635f4d2ecb8d02d86100f8c593bf1ea6b1e68f6aab8b27bbcb144\""
Nov 24 14:00:42 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:42.855634629Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.151334885Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.153450408Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=1937184"
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.156363448Z" level=info msg="ImageCreate event name:\"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.161551496Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.162193515Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"1935750\" in 2.306509548s"
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.162249565Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\""
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.179990125Z" level=info msg="CreateContainer within sandbox \"f472329e9fd635f4d2ecb8d02d86100f8c593bf1ea6b1e68f6aab8b27bbcb144\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.248189267Z" level=info msg="Container 70e558ad037eb593fa44b07e4fd36f48454dee00712743ce51a58d742a33605b: CDI devices from CRI Config.CDIDevices: []"
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.262062616Z" level=info msg="CreateContainer within sandbox \"f472329e9fd635f4d2ecb8d02d86100f8c593bf1ea6b1e68f6aab8b27bbcb144\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"70e558ad037eb593fa44b07e4fd36f48454dee00712743ce51a58d742a33605b\""
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.263385826Z" level=info msg="StartContainer for \"70e558ad037eb593fa44b07e4fd36f48454dee00712743ce51a58d742a33605b\""
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.265058480Z" level=info msg="connecting to shim 70e558ad037eb593fa44b07e4fd36f48454dee00712743ce51a58d742a33605b" address="unix:///run/containerd/s/e47b16e174c686888228b35f0ff63c9e1e5e13d47c7f7c2e532fdeedd0981c84" protocol=ttrpc version=3
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.370550827Z" level=info msg="StartContainer for \"70e558ad037eb593fa44b07e4fd36f48454dee00712743ce51a58d742a33605b\" returns successfully"
Nov 24 14:00:51 old-k8s-version-318786 containerd[755]: E1124 14:00:51.571973 755 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [d1e4297a18de5a35eef1e955a0f6b73d8881ba2296e59d8acaed4614dce5de51] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 8aa94104b4dae56b00431f7362ac05b997af2246775de35dc2eb361b0707b2fa7199f9ddfdba27fdef1331b76d09c41700f6cb5d00836dabab7c0df8e651283f
CoreDNS-1.10.1
linux/arm64, go1.20, 055b2c3
[INFO] 127.0.0.1:60396 - 50045 "HINFO IN 8149976766644082851.319243235608499577. udp 56 false 512" NXDOMAIN qr,rd,ra 56 0.006788489s
==> describe nodes <==
Name: old-k8s-version-318786
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=old-k8s-version-318786
kubernetes.io/os=linux
minikube.k8s.io/commit=b5d1c9f4e75f4e638a533695fd62619949cefcab
minikube.k8s.io/name=old-k8s-version-318786
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_24T14_00_14_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 24 Nov 2025 14:00:09 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-318786
AcquireTime: <unset>
RenewTime: Mon, 24 Nov 2025 14:00:43 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 24 Nov 2025 14:00:43 +0000 Mon, 24 Nov 2025 14:00:06 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 24 Nov 2025 14:00:43 +0000 Mon, 24 Nov 2025 14:00:06 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 24 Nov 2025 14:00:43 +0000 Mon, 24 Nov 2025 14:00:06 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 24 Nov 2025 14:00:43 +0000 Mon, 24 Nov 2025 14:00:39 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.85.2
Hostname: old-k8s-version-318786
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 7283ea1857f18f20a875c29069214c9d
System UUID: 259561de-786f-47f9-8e4d-12bddad03b80
Boot ID: dd480c26-e101-4930-b98c-54c06b430fdc
Kernel Version: 5.15.0-1084-aws
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 10s
kube-system coredns-5dd5756b68-n7s8h 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 27s
kube-system etcd-old-k8s-version-318786 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 39s
kube-system kindnet-z4rkx 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 27s
kube-system kube-apiserver-old-k8s-version-318786 250m (12%) 0 (0%) 0 (0%) 0 (0%) 39s
kube-system kube-controller-manager-old-k8s-version-318786 200m (10%) 0 (0%) 0 (0%) 0 (0%) 39s
kube-system kube-proxy-jwmdg 0 (0%) 0 (0%) 0 (0%) 0 (0%) 27s
kube-system kube-scheduler-old-k8s-version-318786 100m (5%) 0 (0%) 0 (0%) 0 (0%) 39s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 25s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 100m (5%)
memory 220Mi (2%) 220Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 25s kube-proxy
Normal Starting 40s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 39s kubelet Node old-k8s-version-318786 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 39s kubelet Node old-k8s-version-318786 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 39s kubelet Node old-k8s-version-318786 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 39s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 27s node-controller Node old-k8s-version-318786 event: Registered Node old-k8s-version-318786 in Controller
Normal NodeReady 13s kubelet Node old-k8s-version-318786 status is now: NodeReady
==> dmesg <==
[Nov24 12:17] ACPI: SRAT not present
[ +0.000000] ACPI: SRAT not present
[ +0.000000] SPI driver altr_a10sr has no spi_device_id for altr,a10sr
[ +0.014697] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.497291] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.033884] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +0.804993] ena 0000:00:05.0: LLQ is not supported Fallback to host mode policy.
[ +6.476130] kauditd_printk_skb: 36 callbacks suppressed
==> etcd [a96dcde7b48e2020162f86ef991d82171cf903dc40c2588013e878e07607a6eb] <==
{"level":"info","ts":"2025-11-24T14:00:05.836588Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed switched to configuration voters=(11459225503572592365)"}
{"level":"info","ts":"2025-11-24T14:00:05.836695Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"68eaea490fab4e05","local-member-id":"9f0758e1c58a86ed","added-peer-id":"9f0758e1c58a86ed","added-peer-peer-urls":["https://192.168.85.2:2380"]}
{"level":"info","ts":"2025-11-24T14:00:05.836985Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-11-24T14:00:05.83715Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"9f0758e1c58a86ed","initial-advertise-peer-urls":["https://192.168.85.2:2380"],"listen-peer-urls":["https://192.168.85.2:2380"],"advertise-client-urls":["https://192.168.85.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.85.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-24T14:00:05.837189Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-24T14:00:05.837186Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.85.2:2380"}
{"level":"info","ts":"2025-11-24T14:00:05.837211Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.85.2:2380"}
{"level":"info","ts":"2025-11-24T14:00:06.715956Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed is starting a new election at term 1"}
{"level":"info","ts":"2025-11-24T14:00:06.716187Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-24T14:00:06.716278Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed received MsgPreVoteResp from 9f0758e1c58a86ed at term 1"}
{"level":"info","ts":"2025-11-24T14:00:06.716408Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed became candidate at term 2"}
{"level":"info","ts":"2025-11-24T14:00:06.716497Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed received MsgVoteResp from 9f0758e1c58a86ed at term 2"}
{"level":"info","ts":"2025-11-24T14:00:06.716591Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed became leader at term 2"}
{"level":"info","ts":"2025-11-24T14:00:06.716663Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: 9f0758e1c58a86ed elected leader 9f0758e1c58a86ed at term 2"}
{"level":"info","ts":"2025-11-24T14:00:06.719119Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T14:00:06.724173Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"9f0758e1c58a86ed","local-member-attributes":"{Name:old-k8s-version-318786 ClientURLs:[https://192.168.85.2:2379]}","request-path":"/0/members/9f0758e1c58a86ed/attributes","cluster-id":"68eaea490fab4e05","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-24T14:00:06.727971Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"68eaea490fab4e05","local-member-id":"9f0758e1c58a86ed","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T14:00:06.728194Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T14:00:06.728301Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T14:00:06.728041Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-24T14:00:06.732297Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.85.2:2379"}
{"level":"info","ts":"2025-11-24T14:00:06.728075Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-24T14:00:06.73389Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-24T14:00:06.739971Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-24T14:00:06.747818Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
==> kernel <==
14:00:52 up 1:43, 0 user, load average: 3.35, 3.70, 3.04
Linux old-k8s-version-318786 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [8a5ceb46ea7cbcd9a345bdf9ba11d0c7a3a990148842c5c44246730c76d8948d] <==
I1124 14:00:28.769606 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1124 14:00:28.860713 1 main.go:139] hostIP = 192.168.85.2
podIP = 192.168.85.2
I1124 14:00:28.860851 1 main.go:148] setting mtu 1500 for CNI
I1124 14:00:28.860870 1 main.go:178] kindnetd IP family: "ipv4"
I1124 14:00:28.860885 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-24T14:00:29Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1124 14:00:29.062756 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1124 14:00:29.064202 1 controller.go:381] "Waiting for informer caches to sync"
I1124 14:00:29.064283 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1124 14:00:29.064439 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1124 14:00:29.264984 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1124 14:00:29.265101 1 metrics.go:72] Registering metrics
I1124 14:00:29.265206 1 controller.go:711] "Syncing nftables rules"
I1124 14:00:39.066105 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1124 14:00:39.066164 1 main.go:301] handling current node
I1124 14:00:49.064077 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1124 14:00:49.064224 1 main.go:301] handling current node
==> kube-apiserver [64ea1db6adeecccf4211992b471a4088bba1825d5764c029cd41c736f16d8131] <==
I1124 14:00:09.559574 1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
I1124 14:00:09.559602 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1124 14:00:09.567371 1 aggregator.go:166] initial CRD sync complete...
I1124 14:00:09.567396 1 autoregister_controller.go:141] Starting autoregister controller
I1124 14:00:09.567404 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1124 14:00:09.567413 1 cache.go:39] Caches are synced for autoregister controller
I1124 14:00:09.570195 1 controller.go:624] quota admission added evaluator for: namespaces
E1124 14:00:09.602455 1 controller.go:146] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms"
I1124 14:00:09.654324 1 shared_informer.go:318] Caches are synced for node_authorizer
I1124 14:00:09.818311 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1124 14:00:10.356017 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1124 14:00:10.369141 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1124 14:00:10.369180 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1124 14:00:11.220927 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1124 14:00:11.271999 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1124 14:00:11.406464 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1124 14:00:11.418391 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.85.2]
I1124 14:00:11.420227 1 controller.go:624] quota admission added evaluator for: endpoints
I1124 14:00:11.426883 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1124 14:00:11.578646 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1124 14:00:12.895802 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1124 14:00:12.925996 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1124 14:00:12.938109 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1124 14:00:25.666171 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1124 14:00:25.763116 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [d422fb0577ca71bb502e21fc4c5afd81d722a57cf4424a6d0acafef3ae4afb9a] <==
I1124 14:00:25.810858 1 range_allocator.go:380] "Set node PodCIDR" node="old-k8s-version-318786" podCIDRs=["10.244.0.0/24"]
I1124 14:00:25.820650 1 event.go:307] "Event occurred" object="kube-system/kube-scheduler-old-k8s-version-318786" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1124 14:00:25.832097 1 event.go:307] "Event occurred" object="kube-system/kube-controller-manager-old-k8s-version-318786" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1124 14:00:25.835948 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-n7s8h"
I1124 14:00:25.836226 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-z4rkx"
I1124 14:00:25.844347 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-jwmdg"
I1124 14:00:25.872136 1 shared_informer.go:318] Caches are synced for HPA
I1124 14:00:25.873361 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-nhcwg"
I1124 14:00:25.905108 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="206.561387ms"
I1124 14:00:25.943326 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="37.986992ms"
I1124 14:00:25.943650 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="78.392µs"
I1124 14:00:26.225808 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 14:00:26.225842 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1124 14:00:26.240729 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 14:00:27.499329 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1124 14:00:27.521996 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-nhcwg"
I1124 14:00:27.537841 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="38.305665ms"
I1124 14:00:27.559719 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="21.827601ms"
I1124 14:00:27.559805 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="51.644µs"
I1124 14:00:39.122848 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="83.538µs"
I1124 14:00:39.150933 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="85.22µs"
I1124 14:00:40.276969 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="184.922µs"
I1124 14:00:40.328812 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="12.750431ms"
I1124 14:00:40.330201 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="64.312µs"
I1124 14:00:40.747463 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [e431b25999ece5eb3499ee68f2c85868448494e4787845d9737ad20b4a20f2f8] <==
I1124 14:00:26.865991 1 server_others.go:69] "Using iptables proxy"
I1124 14:00:26.884883 1 node.go:141] Successfully retrieved node IP: 192.168.85.2
I1124 14:00:26.934067 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1124 14:00:26.935893 1 server_others.go:152] "Using iptables Proxier"
I1124 14:00:26.936119 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1124 14:00:26.936132 1 server_others.go:438] "Defaulting to no-op detect-local"
I1124 14:00:26.936170 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1124 14:00:26.936420 1 server.go:846] "Version info" version="v1.28.0"
I1124 14:00:26.936439 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1124 14:00:26.937512 1 config.go:188] "Starting service config controller"
I1124 14:00:26.937582 1 shared_informer.go:311] Waiting for caches to sync for service config
I1124 14:00:26.937602 1 config.go:97] "Starting endpoint slice config controller"
I1124 14:00:26.937606 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1124 14:00:26.938430 1 config.go:315] "Starting node config controller"
I1124 14:00:26.938440 1 shared_informer.go:311] Waiting for caches to sync for node config
I1124 14:00:27.039179 1 shared_informer.go:318] Caches are synced for node config
I1124 14:00:27.039222 1 shared_informer.go:318] Caches are synced for service config
I1124 14:00:27.039271 1 shared_informer.go:318] Caches are synced for endpoint slice config
==> kube-scheduler [0769df21ce83c4995a35d15a4e7ae3000b8a5d86168fda1bff6738b8943c92ef] <==
W1124 14:00:10.860716 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1124 14:00:10.860734 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1124 14:00:10.861473 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1124 14:00:10.861503 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W1124 14:00:10.866658 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1124 14:00:10.866694 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1124 14:00:10.866737 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1124 14:00:10.866752 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1124 14:00:10.867029 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1124 14:00:10.867053 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1124 14:00:10.867116 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1124 14:00:10.867134 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1124 14:00:10.867194 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1124 14:00:10.867211 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1124 14:00:10.867277 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1124 14:00:10.867299 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1124 14:00:10.869201 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1124 14:00:10.869232 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1124 14:00:10.869290 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1124 14:00:10.869420 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1124 14:00:10.869379 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1124 14:00:10.869453 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1124 14:00:10.870338 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1124 14:00:10.870513 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
I1124 14:00:11.746244 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 24 14:00:25 old-k8s-version-318786 kubelet[1527]: I1124 14:00:25.889706 1527 kuberuntime_manager.go:1463] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Nov 24 14:00:25 old-k8s-version-318786 kubelet[1527]: I1124 14:00:25.891508 1527 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 24 14:00:25 old-k8s-version-318786 kubelet[1527]: I1124 14:00:25.894804 1527 topology_manager.go:215] "Topology Admit Handler" podUID="11a8b197-dd22-45df-9593-66d16fdefa80" podNamespace="kube-system" podName="kube-proxy-jwmdg"
Nov 24 14:00:25 old-k8s-version-318786 kubelet[1527]: I1124 14:00:25.914677 1527 topology_manager.go:215] "Topology Admit Handler" podUID="053d781f-846e-4391-a537-edd057019339" podNamespace="kube-system" podName="kindnet-z4rkx"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018048 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/053d781f-846e-4391-a537-edd057019339-lib-modules\") pod \"kindnet-z4rkx\" (UID: \"053d781f-846e-4391-a537-edd057019339\") " pod="kube-system/kindnet-z4rkx"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018107 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/11a8b197-dd22-45df-9593-66d16fdefa80-kube-proxy\") pod \"kube-proxy-jwmdg\" (UID: \"11a8b197-dd22-45df-9593-66d16fdefa80\") " pod="kube-system/kube-proxy-jwmdg"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018131 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/11a8b197-dd22-45df-9593-66d16fdefa80-lib-modules\") pod \"kube-proxy-jwmdg\" (UID: \"11a8b197-dd22-45df-9593-66d16fdefa80\") " pod="kube-system/kube-proxy-jwmdg"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018158 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/11a8b197-dd22-45df-9593-66d16fdefa80-xtables-lock\") pod \"kube-proxy-jwmdg\" (UID: \"11a8b197-dd22-45df-9593-66d16fdefa80\") " pod="kube-system/kube-proxy-jwmdg"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018212 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wldf\" (UniqueName: \"kubernetes.io/projected/053d781f-846e-4391-a537-edd057019339-kube-api-access-2wldf\") pod \"kindnet-z4rkx\" (UID: \"053d781f-846e-4391-a537-edd057019339\") " pod="kube-system/kindnet-z4rkx"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018240 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/053d781f-846e-4391-a537-edd057019339-cni-cfg\") pod \"kindnet-z4rkx\" (UID: \"053d781f-846e-4391-a537-edd057019339\") " pod="kube-system/kindnet-z4rkx"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018265 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/053d781f-846e-4391-a537-edd057019339-xtables-lock\") pod \"kindnet-z4rkx\" (UID: \"053d781f-846e-4391-a537-edd057019339\") " pod="kube-system/kindnet-z4rkx"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018289 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zj98c\" (UniqueName: \"kubernetes.io/projected/11a8b197-dd22-45df-9593-66d16fdefa80-kube-api-access-zj98c\") pod \"kube-proxy-jwmdg\" (UID: \"11a8b197-dd22-45df-9593-66d16fdefa80\") " pod="kube-system/kube-proxy-jwmdg"
Nov 24 14:00:27 old-k8s-version-318786 kubelet[1527]: I1124 14:00:27.246948 1527 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-jwmdg" podStartSLOduration=2.246903083 podCreationTimestamp="2025-11-24 14:00:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:00:27.246446757 +0000 UTC m=+14.385235109" watchObservedRunningTime="2025-11-24 14:00:27.246903083 +0000 UTC m=+14.385691436"
Nov 24 14:00:33 old-k8s-version-318786 kubelet[1527]: I1124 14:00:33.074010 1527 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-z4rkx" podStartSLOduration=5.998729082 podCreationTimestamp="2025-11-24 14:00:25 +0000 UTC" firstStartedPulling="2025-11-24 14:00:26.522078288 +0000 UTC m=+13.660866641" lastFinishedPulling="2025-11-24 14:00:28.597316912 +0000 UTC m=+15.736105264" observedRunningTime="2025-11-24 14:00:29.252063076 +0000 UTC m=+16.390851428" watchObservedRunningTime="2025-11-24 14:00:33.073967705 +0000 UTC m=+20.212756058"
Nov 24 14:00:39 old-k8s-version-318786 kubelet[1527]: I1124 14:00:39.082518 1527 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 24 14:00:39 old-k8s-version-318786 kubelet[1527]: I1124 14:00:39.121702 1527 topology_manager.go:215] "Topology Admit Handler" podUID="72202b02-1ca2-4c69-ad47-3f1ef90ba8ba" podNamespace="kube-system" podName="coredns-5dd5756b68-n7s8h"
Nov 24 14:00:39 old-k8s-version-318786 kubelet[1527]: I1124 14:00:39.132093 1527 topology_manager.go:215] "Topology Admit Handler" podUID="2298aa73-9529-42f0-a0ec-22197acfa4ba" podNamespace="kube-system" podName="storage-provisioner"
Nov 24 14:00:39 old-k8s-version-318786 kubelet[1527]: I1124 14:00:39.309362 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68nxx\" (UniqueName: \"kubernetes.io/projected/72202b02-1ca2-4c69-ad47-3f1ef90ba8ba-kube-api-access-68nxx\") pod \"coredns-5dd5756b68-n7s8h\" (UID: \"72202b02-1ca2-4c69-ad47-3f1ef90ba8ba\") " pod="kube-system/coredns-5dd5756b68-n7s8h"
Nov 24 14:00:39 old-k8s-version-318786 kubelet[1527]: I1124 14:00:39.309430 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z64wd\" (UniqueName: \"kubernetes.io/projected/2298aa73-9529-42f0-a0ec-22197acfa4ba-kube-api-access-z64wd\") pod \"storage-provisioner\" (UID: \"2298aa73-9529-42f0-a0ec-22197acfa4ba\") " pod="kube-system/storage-provisioner"
Nov 24 14:00:39 old-k8s-version-318786 kubelet[1527]: I1124 14:00:39.309458 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72202b02-1ca2-4c69-ad47-3f1ef90ba8ba-config-volume\") pod \"coredns-5dd5756b68-n7s8h\" (UID: \"72202b02-1ca2-4c69-ad47-3f1ef90ba8ba\") " pod="kube-system/coredns-5dd5756b68-n7s8h"
Nov 24 14:00:39 old-k8s-version-318786 kubelet[1527]: I1124 14:00:39.309484 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/2298aa73-9529-42f0-a0ec-22197acfa4ba-tmp\") pod \"storage-provisioner\" (UID: \"2298aa73-9529-42f0-a0ec-22197acfa4ba\") " pod="kube-system/storage-provisioner"
Nov 24 14:00:40 old-k8s-version-318786 kubelet[1527]: I1124 14:00:40.295007 1527 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-n7s8h" podStartSLOduration=15.294930673 podCreationTimestamp="2025-11-24 14:00:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:00:40.27945045 +0000 UTC m=+27.418238811" watchObservedRunningTime="2025-11-24 14:00:40.294930673 +0000 UTC m=+27.433719026"
Nov 24 14:00:40 old-k8s-version-318786 kubelet[1527]: I1124 14:00:40.313747 1527 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=13.313703157 podCreationTimestamp="2025-11-24 14:00:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:00:40.296336403 +0000 UTC m=+27.435124755" watchObservedRunningTime="2025-11-24 14:00:40.313703157 +0000 UTC m=+27.452491510"
Nov 24 14:00:42 old-k8s-version-318786 kubelet[1527]: I1124 14:00:42.439571 1527 topology_manager.go:215] "Topology Admit Handler" podUID="f566ecf0-6907-464c-9185-0f1cac06d38f" podNamespace="default" podName="busybox"
Nov 24 14:00:42 old-k8s-version-318786 kubelet[1527]: I1124 14:00:42.534626 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9mqp\" (UniqueName: \"kubernetes.io/projected/f566ecf0-6907-464c-9185-0f1cac06d38f-kube-api-access-t9mqp\") pod \"busybox\" (UID: \"f566ecf0-6907-464c-9185-0f1cac06d38f\") " pod="default/busybox"
==> storage-provisioner [33ca9b6d24a80a1f0470355c5dc5bf87df622a7ffd33dad20b3a66e3d42820fb] <==
I1124 14:00:39.762113 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1124 14:00:39.776081 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1124 14:00:39.776154 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1124 14:00:39.787120 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1124 14:00:39.787379 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-318786_ab0f5e48-32a3-4e29-9ee1-b1971bc22e35!
I1124 14:00:39.788450 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"fe6ba064-a6c2-4186-b355-eb48ac5eb1d0", APIVersion:"v1", ResourceVersion:"412", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-318786_ab0f5e48-32a3-4e29-9ee1-b1971bc22e35 became leader
I1124 14:00:39.888593 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-318786_ab0f5e48-32a3-4e29-9ee1-b1971bc22e35!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-318786 -n old-k8s-version-318786
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-318786 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-318786
helpers_test.go:243: (dbg) docker inspect old-k8s-version-318786:
-- stdout --
[
{
"Id": "a1a9c211e03d84dc290244440868edd560e068d58cbff839724b36106b46b8b5",
"Created": "2025-11-24T13:59:48.707287298Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 203512,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-24T13:59:48.794762344Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:572c983e466f1f784136812eef5cc59ac623db764bc7704d3676c4643993fd08",
"ResolvConfPath": "/var/lib/docker/containers/a1a9c211e03d84dc290244440868edd560e068d58cbff839724b36106b46b8b5/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/a1a9c211e03d84dc290244440868edd560e068d58cbff839724b36106b46b8b5/hostname",
"HostsPath": "/var/lib/docker/containers/a1a9c211e03d84dc290244440868edd560e068d58cbff839724b36106b46b8b5/hosts",
"LogPath": "/var/lib/docker/containers/a1a9c211e03d84dc290244440868edd560e068d58cbff839724b36106b46b8b5/a1a9c211e03d84dc290244440868edd560e068d58cbff839724b36106b46b8b5-json.log",
"Name": "/old-k8s-version-318786",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-318786:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "old-k8s-version-318786",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "a1a9c211e03d84dc290244440868edd560e068d58cbff839724b36106b46b8b5",
"LowerDir": "/var/lib/docker/overlay2/6d81197f1905508bee65982ae14ace70a0ac50476483b3a6dbe6ee1b71c20126-init/diff:/var/lib/docker/overlay2/f206897dad0d7c6b66379aa7c75402ab98ba158a4fc5aedf84eda3d57da10430/diff",
"MergedDir": "/var/lib/docker/overlay2/6d81197f1905508bee65982ae14ace70a0ac50476483b3a6dbe6ee1b71c20126/merged",
"UpperDir": "/var/lib/docker/overlay2/6d81197f1905508bee65982ae14ace70a0ac50476483b3a6dbe6ee1b71c20126/diff",
"WorkDir": "/var/lib/docker/overlay2/6d81197f1905508bee65982ae14ace70a0ac50476483b3a6dbe6ee1b71c20126/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-318786",
"Source": "/var/lib/docker/volumes/old-k8s-version-318786/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-318786",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-318786",
"name.minikube.sigs.k8s.io": "old-k8s-version-318786",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "afc5451f55c0addfee2faf75046d85ee1aff51cfb29d1330d1b700fc0f910363",
"SandboxKey": "/var/run/docker/netns/afc5451f55c0",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33053"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33054"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33057"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33055"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33056"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"old-k8s-version-318786": {
"IPAMConfig": {
"IPv4Address": "192.168.85.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "aa:35:e5:9c:e1:30",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "3c8da78d6dab92e1227f095e0039dcc72885109237746924b800f0f7e07a64d9",
"EndpointID": "c068219706ac0808a20d3010c587a2e59831507d8b6c4030ff3e4a62ce6b15dc",
"Gateway": "192.168.85.1",
"IPAddress": "192.168.85.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-318786",
"a1a9c211e03d"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-318786 -n old-k8s-version-318786
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p old-k8s-version-318786 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p old-k8s-version-318786 logs -n 25: (1.244760502s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────────┬─────────┬─────────┬─────────────────────┬───────────
──────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────────┼─────────┼─────────┼─────────────────────┼───────────
──────────┤
│ ssh │ -p cilium-803934 sudo systemctl status cri-docker --all --full --no-pager │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo systemctl cat cri-docker --no-pager │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo cri-dockerd --version │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo systemctl status containerd --all --full --no-pager │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo systemctl cat containerd --no-pager │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo cat /lib/systemd/system/containerd.service │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo cat /etc/containerd/config.toml │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo containerd config dump │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo systemctl status crio --all --full --no-pager │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo systemctl cat crio --no-pager │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ ssh │ -p cilium-803934 sudo crio config │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ │
│ delete │ -p cilium-803934 │ cilium-803934 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ 24 Nov 25 13:58 UTC │
│ start │ -p force-systemd-env-134839 --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-env-134839 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ 24 Nov 25 13:59 UTC │
│ delete │ -p kubernetes-upgrade-758885 │ kubernetes-upgrade-758885 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ 24 Nov 25 13:58 UTC │
│ start │ -p cert-expiration-865605 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=containerd │ cert-expiration-865605 │ jenkins │ v1.37.0 │ 24 Nov 25 13:58 UTC │ 24 Nov 25 13:59 UTC │
│ ssh │ force-systemd-env-134839 ssh cat /etc/containerd/config.toml │ force-systemd-env-134839 │ jenkins │ v1.37.0 │ 24 Nov 25 13:59 UTC │ 24 Nov 25 13:59 UTC │
│ delete │ -p force-systemd-env-134839 │ force-systemd-env-134839 │ jenkins │ v1.37.0 │ 24 Nov 25 13:59 UTC │ 24 Nov 25 13:59 UTC │
│ start │ -p cert-options-440754 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-440754 │ jenkins │ v1.37.0 │ 24 Nov 25 13:59 UTC │ 24 Nov 25 13:59 UTC │
│ ssh │ cert-options-440754 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-440754 │ jenkins │ v1.37.0 │ 24 Nov 25 13:59 UTC │ 24 Nov 25 13:59 UTC │
│ ssh │ -p cert-options-440754 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-440754 │ jenkins │ v1.37.0 │ 24 Nov 25 13:59 UTC │ 24 Nov 25 13:59 UTC │
│ delete │ -p cert-options-440754 │ cert-options-440754 │ jenkins │ v1.37.0 │ 24 Nov 25 13:59 UTC │ 24 Nov 25 13:59 UTC │
│ start │ -p old-k8s-version-318786 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-318786 │ jenkins │ v1.37.0 │ 24 Nov 25 13:59 UTC │ 24 Nov 25 14:00 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────────┴─────────┴─────────┴─────────────────────┴───────────
──────────┘
==> Last Start <==
Log file created at: 2025/11/24 13:59:42
Running on machine: ip-172-31-24-2
Binary: Built with gc go1.25.3 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1124 13:59:42.406479 203121 out.go:360] Setting OutFile to fd 1 ...
I1124 13:59:42.406674 203121 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 13:59:42.406701 203121 out.go:374] Setting ErrFile to fd 2...
I1124 13:59:42.406722 203121 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 13:59:42.407140 203121 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21932-2368/.minikube/bin
I1124 13:59:42.407724 203121 out.go:368] Setting JSON to false
I1124 13:59:42.409260 203121 start.go:133] hostinfo: {"hostname":"ip-172-31-24-2","uptime":6131,"bootTime":1763986651,"procs":189,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"6d436adf-771e-4269-b9a3-c25fd4fca4f5"}
I1124 13:59:42.409372 203121 start.go:143] virtualization:
I1124 13:59:42.413282 203121 out.go:179] * [old-k8s-version-318786] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I1124 13:59:42.417925 203121 out.go:179] - MINIKUBE_LOCATION=21932
I1124 13:59:42.418098 203121 notify.go:221] Checking for updates...
I1124 13:59:42.424905 203121 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1124 13:59:42.428148 203121 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21932-2368/kubeconfig
I1124 13:59:42.431322 203121 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21932-2368/.minikube
I1124 13:59:42.434379 203121 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I1124 13:59:42.438100 203121 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1124 13:59:42.442160 203121 config.go:182] Loaded profile config "cert-expiration-865605": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1124 13:59:42.442285 203121 driver.go:422] Setting default libvirt URI to qemu:///system
I1124 13:59:42.470073 203121 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I1124 13:59:42.470195 203121 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 13:59:42.532782 203121 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-24 13:59:42.52123261 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aa
rch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path
:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1124 13:59:42.532892 203121 docker.go:319] overlay module found
I1124 13:59:42.536185 203121 out.go:179] * Using the docker driver based on user configuration
I1124 13:59:42.539175 203121 start.go:309] selected driver: docker
I1124 13:59:42.539208 203121 start.go:927] validating driver "docker" against <nil>
I1124 13:59:42.539232 203121 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1124 13:59:42.540233 203121 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 13:59:42.601740 203121 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-24 13:59:42.592481576 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pat
h:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1124 13:59:42.601887 203121 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1124 13:59:42.602115 203121 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 13:59:42.605231 203121 out.go:179] * Using Docker driver with root privileges
I1124 13:59:42.608204 203121 cni.go:84] Creating CNI manager for ""
I1124 13:59:42.608281 203121 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 13:59:42.608296 203121 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1124 13:59:42.608380 203121 start.go:353] cluster config:
{Name:old-k8s-version-318786 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-318786 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSH
AuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 13:59:42.611704 203121 out.go:179] * Starting "old-k8s-version-318786" primary control-plane node in "old-k8s-version-318786" cluster
I1124 13:59:42.614615 203121 cache.go:134] Beginning downloading kic base image for docker with containerd
I1124 13:59:42.617691 203121 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1124 13:59:42.620619 203121 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 13:59:42.620699 203121 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21932-2368/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4
I1124 13:59:42.620716 203121 cache.go:65] Caching tarball of preloaded images
I1124 13:59:42.620714 203121 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1124 13:59:42.620820 203121 preload.go:238] Found /home/jenkins/minikube-integration/21932-2368/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I1124 13:59:42.620838 203121 cache.go:68] Finished verifying existence of preloaded tar for v1.28.0 on containerd
I1124 13:59:42.620958 203121 profile.go:143] Saving config to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/config.json ...
I1124 13:59:42.620983 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/config.json: {Name:mkdbbadabe7d23b9f104ff19d81818950111a382 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:42.640749 203121 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1124 13:59:42.640776 203121 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1124 13:59:42.640802 203121 cache.go:240] Successfully downloaded all kic artifacts
I1124 13:59:42.640833 203121 start.go:360] acquireMachinesLock for old-k8s-version-318786: {Name:mkda208a8325231a646a1a7f876724cc4fca17ee Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:59:42.640958 203121 start.go:364] duration metric: took 103.057µs to acquireMachinesLock for "old-k8s-version-318786"
I1124 13:59:42.640986 203121 start.go:93] Provisioning new machine with config: &{Name:old-k8s-version-318786 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-318786 Namespace:default APIServerHAVIP:
APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:f
alse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 13:59:42.641059 203121 start.go:125] createHost starting for "" (driver="docker")
I1124 13:59:42.644471 203121 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1124 13:59:42.644694 203121 start.go:159] libmachine.API.Create for "old-k8s-version-318786" (driver="docker")
I1124 13:59:42.644747 203121 client.go:173] LocalClient.Create starting
I1124 13:59:42.644827 203121 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca.pem
I1124 13:59:42.644867 203121 main.go:143] libmachine: Decoding PEM data...
I1124 13:59:42.644888 203121 main.go:143] libmachine: Parsing certificate...
I1124 13:59:42.644949 203121 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21932-2368/.minikube/certs/cert.pem
I1124 13:59:42.644971 203121 main.go:143] libmachine: Decoding PEM data...
I1124 13:59:42.644986 203121 main.go:143] libmachine: Parsing certificate...
I1124 13:59:42.645338 203121 cli_runner.go:164] Run: docker network inspect old-k8s-version-318786 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1124 13:59:42.661505 203121 cli_runner.go:211] docker network inspect old-k8s-version-318786 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1124 13:59:42.661596 203121 network_create.go:284] running [docker network inspect old-k8s-version-318786] to gather additional debugging logs...
I1124 13:59:42.661615 203121 cli_runner.go:164] Run: docker network inspect old-k8s-version-318786
W1124 13:59:42.677608 203121 cli_runner.go:211] docker network inspect old-k8s-version-318786 returned with exit code 1
I1124 13:59:42.677643 203121 network_create.go:287] error running [docker network inspect old-k8s-version-318786]: docker network inspect old-k8s-version-318786: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-318786 not found
I1124 13:59:42.677659 203121 network_create.go:289] output of [docker network inspect old-k8s-version-318786]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-318786 not found
** /stderr **
I1124 13:59:42.677758 203121 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 13:59:42.694925 203121 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-e5e15b13860d IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:12:3d:37:c4:cc:77} reservation:<nil>}
I1124 13:59:42.695253 203121 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-66593a990bce IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:de:c0:9b:bc:41:ca} reservation:<nil>}
I1124 13:59:42.695642 203121 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-37e9fb0954cf IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:32:0b:6f:6e:b2:8c} reservation:<nil>}
I1124 13:59:42.695904 203121 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-5977b32dc412 IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:82:75:42:7c:e9:e6} reservation:<nil>}
I1124 13:59:42.696411 203121 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40019bcfe0}
I1124 13:59:42.696437 203121 network_create.go:124] attempt to create docker network old-k8s-version-318786 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1124 13:59:42.696498 203121 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-318786 old-k8s-version-318786
I1124 13:59:42.754268 203121 network_create.go:108] docker network old-k8s-version-318786 192.168.85.0/24 created
I1124 13:59:42.754297 203121 kic.go:121] calculated static IP "192.168.85.2" for the "old-k8s-version-318786" container
I1124 13:59:42.754382 203121 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1124 13:59:42.771474 203121 cli_runner.go:164] Run: docker volume create old-k8s-version-318786 --label name.minikube.sigs.k8s.io=old-k8s-version-318786 --label created_by.minikube.sigs.k8s.io=true
I1124 13:59:42.789916 203121 oci.go:103] Successfully created a docker volume old-k8s-version-318786
I1124 13:59:42.790028 203121 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-318786-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-318786 --entrypoint /usr/bin/test -v old-k8s-version-318786:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1124 13:59:43.376934 203121 oci.go:107] Successfully prepared a docker volume old-k8s-version-318786
I1124 13:59:43.377002 203121 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 13:59:43.377014 203121 kic.go:194] Starting extracting preloaded images to volume ...
I1124 13:59:43.377093 203121 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21932-2368/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-318786:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir
I1124 13:59:48.629782 203121 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21932-2368/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-318786:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir: (5.252643456s)
I1124 13:59:48.629823 203121 kic.go:203] duration metric: took 5.252805903s to extract preloaded images to volume ...
W1124 13:59:48.629966 203121 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1124 13:59:48.630073 203121 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1124 13:59:48.692534 203121 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-318786 --name old-k8s-version-318786 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-318786 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-318786 --network old-k8s-version-318786 --ip 192.168.85.2 --volume old-k8s-version-318786:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1124 13:59:49.023181 203121 cli_runner.go:164] Run: docker container inspect old-k8s-version-318786 --format={{.State.Running}}
I1124 13:59:49.046529 203121 cli_runner.go:164] Run: docker container inspect old-k8s-version-318786 --format={{.State.Status}}
I1124 13:59:49.073693 203121 cli_runner.go:164] Run: docker exec old-k8s-version-318786 stat /var/lib/dpkg/alternatives/iptables
I1124 13:59:49.143680 203121 oci.go:144] the created container "old-k8s-version-318786" has a running status.
I1124 13:59:49.143714 203121 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa...
I1124 13:59:49.471341 203121 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1124 13:59:49.501921 203121 cli_runner.go:164] Run: docker container inspect old-k8s-version-318786 --format={{.State.Status}}
I1124 13:59:49.532238 203121 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1124 13:59:49.532267 203121 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-318786 chown docker:docker /home/docker/.ssh/authorized_keys]
I1124 13:59:49.607023 203121 cli_runner.go:164] Run: docker container inspect old-k8s-version-318786 --format={{.State.Status}}
I1124 13:59:49.637450 203121 machine.go:94] provisionDockerMachine start ...
I1124 13:59:49.637558 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:49.663172 203121 main.go:143] libmachine: Using SSH client type: native
I1124 13:59:49.663576 203121 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33053 <nil> <nil>}
I1124 13:59:49.663586 203121 main.go:143] libmachine: About to run SSH command:
hostname
I1124 13:59:49.666892 203121 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1124 13:59:52.819647 203121 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-318786
I1124 13:59:52.819716 203121 ubuntu.go:182] provisioning hostname "old-k8s-version-318786"
I1124 13:59:52.819805 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:52.837381 203121 main.go:143] libmachine: Using SSH client type: native
I1124 13:59:52.837693 203121 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33053 <nil> <nil>}
I1124 13:59:52.837710 203121 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-318786 && echo "old-k8s-version-318786" | sudo tee /etc/hostname
I1124 13:59:53.001525 203121 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-318786
I1124 13:59:53.001631 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:53.024082 203121 main.go:143] libmachine: Using SSH client type: native
I1124 13:59:53.024554 203121 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33053 <nil> <nil>}
I1124 13:59:53.024610 203121 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-318786' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-318786/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-318786' | sudo tee -a /etc/hosts;
fi
fi
I1124 13:59:53.180483 203121 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1124 13:59:53.180555 203121 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21932-2368/.minikube CaCertPath:/home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21932-2368/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21932-2368/.minikube}
I1124 13:59:53.180601 203121 ubuntu.go:190] setting up certificates
I1124 13:59:53.180641 203121 provision.go:84] configureAuth start
I1124 13:59:53.180754 203121 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-318786
I1124 13:59:53.197870 203121 provision.go:143] copyHostCerts
I1124 13:59:53.197937 203121 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-2368/.minikube/key.pem, removing ...
I1124 13:59:53.197947 203121 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-2368/.minikube/key.pem
I1124 13:59:53.198026 203121 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21932-2368/.minikube/key.pem (1679 bytes)
I1124 13:59:53.198115 203121 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-2368/.minikube/ca.pem, removing ...
I1124 13:59:53.198120 203121 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-2368/.minikube/ca.pem
I1124 13:59:53.198145 203121 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21932-2368/.minikube/ca.pem (1082 bytes)
I1124 13:59:53.198195 203121 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-2368/.minikube/cert.pem, removing ...
I1124 13:59:53.198199 203121 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-2368/.minikube/cert.pem
I1124 13:59:53.198221 203121 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21932-2368/.minikube/cert.pem (1123 bytes)
I1124 13:59:53.198264 203121 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21932-2368/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-318786 san=[127.0.0.1 192.168.85.2 localhost minikube old-k8s-version-318786]
I1124 13:59:53.447750 203121 provision.go:177] copyRemoteCerts
I1124 13:59:53.447821 203121 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1124 13:59:53.447859 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:53.466989 203121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa Username:docker}
I1124 13:59:53.573838 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1124 13:59:53.593131 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1124 13:59:53.614562 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1124 13:59:53.634677 203121 provision.go:87] duration metric: took 453.994052ms to configureAuth
I1124 13:59:53.634716 203121 ubuntu.go:206] setting minikube options for container-runtime
I1124 13:59:53.634894 203121 config.go:182] Loaded profile config "old-k8s-version-318786": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 13:59:53.634916 203121 machine.go:97] duration metric: took 3.997446316s to provisionDockerMachine
I1124 13:59:53.634923 203121 client.go:176] duration metric: took 10.990163165s to LocalClient.Create
I1124 13:59:53.634942 203121 start.go:167] duration metric: took 10.990248318s to libmachine.API.Create "old-k8s-version-318786"
I1124 13:59:53.634951 203121 start.go:293] postStartSetup for "old-k8s-version-318786" (driver="docker")
I1124 13:59:53.634967 203121 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1124 13:59:53.635028 203121 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1124 13:59:53.635072 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:53.651615 203121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa Username:docker}
I1124 13:59:53.760351 203121 ssh_runner.go:195] Run: cat /etc/os-release
I1124 13:59:53.763787 203121 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1124 13:59:53.763818 203121 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1124 13:59:53.763831 203121 filesync.go:126] Scanning /home/jenkins/minikube-integration/21932-2368/.minikube/addons for local assets ...
I1124 13:59:53.763886 203121 filesync.go:126] Scanning /home/jenkins/minikube-integration/21932-2368/.minikube/files for local assets ...
I1124 13:59:53.764002 203121 filesync.go:149] local asset: /home/jenkins/minikube-integration/21932-2368/.minikube/files/etc/ssl/certs/41782.pem -> 41782.pem in /etc/ssl/certs
I1124 13:59:53.764116 203121 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1124 13:59:53.771607 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/files/etc/ssl/certs/41782.pem --> /etc/ssl/certs/41782.pem (1708 bytes)
I1124 13:59:53.790229 203121 start.go:296] duration metric: took 155.256983ms for postStartSetup
I1124 13:59:53.790653 203121 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-318786
I1124 13:59:53.807439 203121 profile.go:143] Saving config to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/config.json ...
I1124 13:59:53.807757 203121 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1124 13:59:53.807816 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:53.825527 203121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa Username:docker}
I1124 13:59:53.928742 203121 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1124 13:59:53.933408 203121 start.go:128] duration metric: took 11.29232535s to createHost
I1124 13:59:53.933433 203121 start.go:83] releasing machines lock for "old-k8s-version-318786", held for 11.292464025s
I1124 13:59:53.933507 203121 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-318786
I1124 13:59:53.950335 203121 ssh_runner.go:195] Run: cat /version.json
I1124 13:59:53.950395 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:53.950688 203121 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1124 13:59:53.950748 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 13:59:53.969960 203121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa Username:docker}
I1124 13:59:53.970283 203121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa Username:docker}
I1124 13:59:54.187220 203121 ssh_runner.go:195] Run: systemctl --version
I1124 13:59:54.193763 203121 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1124 13:59:54.197792 203121 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1124 13:59:54.197862 203121 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1124 13:59:54.225219 203121 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I1124 13:59:54.225241 203121 start.go:496] detecting cgroup driver to use...
I1124 13:59:54.225273 203121 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1124 13:59:54.225319 203121 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1124 13:59:54.240905 203121 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1124 13:59:54.255129 203121 docker.go:218] disabling cri-docker service (if available) ...
I1124 13:59:54.255221 203121 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1124 13:59:54.274287 203121 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1124 13:59:54.293183 203121 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1124 13:59:54.421827 203121 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1124 13:59:54.546597 203121 docker.go:234] disabling docker service ...
I1124 13:59:54.546687 203121 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1124 13:59:54.569497 203121 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1124 13:59:54.583215 203121 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1124 13:59:54.700724 203121 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1124 13:59:54.819165 203121 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1124 13:59:54.832231 203121 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1124 13:59:54.851866 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1124 13:59:54.862178 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1124 13:59:54.871620 203121 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1124 13:59:54.871738 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1124 13:59:54.882231 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 13:59:54.891717 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1124 13:59:54.901467 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 13:59:54.910294 203121 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1124 13:59:54.918660 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1124 13:59:54.927868 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1124 13:59:54.937082 203121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1124 13:59:54.946216 203121 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1124 13:59:54.954056 203121 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1124 13:59:54.961958 203121 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:59:55.097492 203121 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1124 13:59:55.230526 203121 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1124 13:59:55.230649 203121 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1124 13:59:55.234998 203121 start.go:564] Will wait 60s for crictl version
I1124 13:59:55.235132 203121 ssh_runner.go:195] Run: which crictl
I1124 13:59:55.238882 203121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1124 13:59:55.268214 203121 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1124 13:59:55.268356 203121 ssh_runner.go:195] Run: containerd --version
I1124 13:59:55.288303 203121 ssh_runner.go:195] Run: containerd --version
I1124 13:59:55.314523 203121 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1124 13:59:55.317381 203121 cli_runner.go:164] Run: docker network inspect old-k8s-version-318786 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 13:59:55.334289 203121 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I1124 13:59:55.338412 203121 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 13:59:55.348875 203121 kubeadm.go:884] updating cluster {Name:old-k8s-version-318786 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-318786 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1124 13:59:55.349007 203121 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 13:59:55.349078 203121 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 13:59:55.373604 203121 containerd.go:627] all images are preloaded for containerd runtime.
I1124 13:59:55.373629 203121 containerd.go:534] Images already preloaded, skipping extraction
I1124 13:59:55.373693 203121 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 13:59:55.398685 203121 containerd.go:627] all images are preloaded for containerd runtime.
I1124 13:59:55.398711 203121 cache_images.go:86] Images are preloaded, skipping loading
I1124 13:59:55.398719 203121 kubeadm.go:935] updating node { 192.168.85.2 8443 v1.28.0 containerd true true} ...
I1124 13:59:55.398825 203121 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-318786 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-318786 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1124 13:59:55.398898 203121 ssh_runner.go:195] Run: sudo crictl info
I1124 13:59:55.429304 203121 cni.go:84] Creating CNI manager for ""
I1124 13:59:55.429328 203121 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 13:59:55.429372 203121 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1124 13:59:55.429403 203121 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-318786 NodeName:old-k8s-version-318786 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt
StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1124 13:59:55.429550 203121 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-318786"
kubeletExtraArgs:
node-ip: 192.168.85.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1124 13:59:55.429622 203121 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1124 13:59:55.437772 203121 binaries.go:51] Found k8s binaries, skipping transfer
I1124 13:59:55.437895 203121 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1124 13:59:55.445856 203121 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1124 13:59:55.459167 203121 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1124 13:59:55.473519 203121 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2176 bytes)
I1124 13:59:55.487760 203121 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I1124 13:59:55.491722 203121 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 13:59:55.502994 203121 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:59:55.625341 203121 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 13:59:55.647018 203121 certs.go:69] Setting up /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786 for IP: 192.168.85.2
I1124 13:59:55.647099 203121 certs.go:195] generating shared ca certs ...
I1124 13:59:55.647130 203121 certs.go:227] acquiring lock for ca certs: {Name:mkcd8707c782acde0e57168c044a3df942dc4ab1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:55.647322 203121 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21932-2368/.minikube/ca.key
I1124 13:59:55.647396 203121 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21932-2368/.minikube/proxy-client-ca.key
I1124 13:59:55.647432 203121 certs.go:257] generating profile certs ...
I1124 13:59:55.647513 203121 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/client.key
I1124 13:59:55.647551 203121 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/client.crt with IP's: []
I1124 13:59:56.033129 203121 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/client.crt ...
I1124 13:59:56.033212 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/client.crt: {Name:mk69bb915606644e0645060fa46449dd65f83095 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:56.033449 203121 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/client.key ...
I1124 13:59:56.033488 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/client.key: {Name:mkfa34a7c8b2d69c736fc1cfd2304ae49133ac4b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:56.033640 203121 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.key.251f69ae
I1124 13:59:56.033684 203121 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.crt.251f69ae with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.85.2]
I1124 13:59:56.281567 203121 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.crt.251f69ae ...
I1124 13:59:56.281598 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.crt.251f69ae: {Name:mk572ed713bf0eec1d0b840d076729a08786aff7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:56.281810 203121 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.key.251f69ae ...
I1124 13:59:56.281825 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.key.251f69ae: {Name:mk6b112bdb309b7ed87e7e056627f1c30ccc769a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:56.281918 203121 certs.go:382] copying /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.crt.251f69ae -> /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.crt
I1124 13:59:56.281996 203121 certs.go:386] copying /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.key.251f69ae -> /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.key
I1124 13:59:56.282057 203121 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.key
I1124 13:59:56.282077 203121 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.crt with IP's: []
I1124 13:59:56.404952 203121 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.crt ...
I1124 13:59:56.404984 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.crt: {Name:mk506f5bcd13da36d0e32b27db8471ef560cbc9f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:56.405167 203121 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.key ...
I1124 13:59:56.405182 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.key: {Name:mk63bb1c02064c41d85f1d8bf24cb0b4a26d687a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:59:56.405366 203121 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/4178.pem (1338 bytes)
W1124 13:59:56.405416 203121 certs.go:480] ignoring /home/jenkins/minikube-integration/21932-2368/.minikube/certs/4178_empty.pem, impossibly tiny 0 bytes
I1124 13:59:56.405425 203121 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca-key.pem (1679 bytes)
I1124 13:59:56.405454 203121 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/ca.pem (1082 bytes)
I1124 13:59:56.405487 203121 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/cert.pem (1123 bytes)
I1124 13:59:56.405517 203121 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-2368/.minikube/certs/key.pem (1679 bytes)
I1124 13:59:56.405566 203121 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-2368/.minikube/files/etc/ssl/certs/41782.pem (1708 bytes)
I1124 13:59:56.406138 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1124 13:59:56.425371 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1124 13:59:56.445638 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1124 13:59:56.465263 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1124 13:59:56.484806 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1124 13:59:56.503229 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1124 13:59:56.526839 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1124 13:59:56.546328 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/profiles/old-k8s-version-318786/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1124 13:59:56.568330 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/certs/4178.pem --> /usr/share/ca-certificates/4178.pem (1338 bytes)
I1124 13:59:56.588914 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/files/etc/ssl/certs/41782.pem --> /usr/share/ca-certificates/41782.pem (1708 bytes)
I1124 13:59:56.609069 203121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-2368/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1124 13:59:56.635519 203121 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1124 13:59:56.649021 203121 ssh_runner.go:195] Run: openssl version
I1124 13:59:56.655362 203121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/41782.pem && ln -fs /usr/share/ca-certificates/41782.pem /etc/ssl/certs/41782.pem"
I1124 13:59:56.664078 203121 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/41782.pem
I1124 13:59:56.667939 203121 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 24 13:21 /usr/share/ca-certificates/41782.pem
I1124 13:59:56.668018 203121 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/41782.pem
I1124 13:59:56.709276 203121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/41782.pem /etc/ssl/certs/3ec20f2e.0"
I1124 13:59:56.717713 203121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1124 13:59:56.725687 203121 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1124 13:59:56.729416 203121 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 24 13:14 /usr/share/ca-certificates/minikubeCA.pem
I1124 13:59:56.729511 203121 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1124 13:59:56.771028 203121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1124 13:59:56.779345 203121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/4178.pem && ln -fs /usr/share/ca-certificates/4178.pem /etc/ssl/certs/4178.pem"
I1124 13:59:56.787738 203121 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/4178.pem
I1124 13:59:56.792183 203121 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 24 13:21 /usr/share/ca-certificates/4178.pem
I1124 13:59:56.792289 203121 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/4178.pem
I1124 13:59:56.833374 203121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/4178.pem /etc/ssl/certs/51391683.0"
I1124 13:59:56.841910 203121 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1124 13:59:56.845538 203121 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1124 13:59:56.845595 203121 kubeadm.go:401] StartCluster: {Name:old-k8s-version-318786 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-318786 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 13:59:56.845673 203121 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1124 13:59:56.845734 203121 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1124 13:59:56.874154 203121 cri.go:89] found id: ""
I1124 13:59:56.874225 203121 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1124 13:59:56.882169 203121 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1124 13:59:56.890196 203121 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1124 13:59:56.890264 203121 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1124 13:59:56.898559 203121 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1124 13:59:56.898579 203121 kubeadm.go:158] found existing configuration files:
I1124 13:59:56.898629 203121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1124 13:59:56.906476 203121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1124 13:59:56.906616 203121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1124 13:59:56.914551 203121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1124 13:59:56.922673 203121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1124 13:59:56.922748 203121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1124 13:59:56.931103 203121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1124 13:59:56.939465 203121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1124 13:59:56.939567 203121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1124 13:59:56.947086 203121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1124 13:59:56.955210 203121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1124 13:59:56.955302 203121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1124 13:59:56.963184 203121 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1124 13:59:57.020344 203121 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1124 13:59:57.020647 203121 kubeadm.go:319] [preflight] Running pre-flight checks
I1124 13:59:57.059971 203121 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1124 13:59:57.060049 203121 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I1124 13:59:57.060090 203121 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1124 13:59:57.060146 203121 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1124 13:59:57.060199 203121 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1124 13:59:57.060249 203121 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1124 13:59:57.060302 203121 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1124 13:59:57.060354 203121 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1124 13:59:57.060407 203121 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1124 13:59:57.060457 203121 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1124 13:59:57.060509 203121 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1124 13:59:57.060558 203121 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1124 13:59:57.153578 203121 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1124 13:59:57.153733 203121 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1124 13:59:57.153905 203121 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1124 13:59:57.330900 203121 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1124 13:59:57.336703 203121 out.go:252] - Generating certificates and keys ...
I1124 13:59:57.336796 203121 kubeadm.go:319] [certs] Using existing ca certificate authority
I1124 13:59:57.336870 203121 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1124 13:59:57.865889 203121 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1124 13:59:58.185353 203121 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1124 13:59:59.130735 203121 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1124 13:59:59.642294 203121 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1124 13:59:59.906079 203121 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1124 13:59:59.906451 203121 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-318786] and IPs [192.168.85.2 127.0.0.1 ::1]
I1124 14:00:00.123407 203121 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1124 14:00:00.123551 203121 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-318786] and IPs [192.168.85.2 127.0.0.1 ::1]
I1124 14:00:00.270762 203121 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1124 14:00:01.217860 203121 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1124 14:00:01.724986 203121 kubeadm.go:319] [certs] Generating "sa" key and public key
I1124 14:00:01.740375 203121 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1124 14:00:02.910438 203121 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1124 14:00:03.183161 203121 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1124 14:00:03.857453 203121 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1124 14:00:04.272263 203121 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1124 14:00:04.273275 203121 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1124 14:00:04.276092 203121 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1124 14:00:04.279552 203121 out.go:252] - Booting up control plane ...
I1124 14:00:04.279655 203121 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1124 14:00:04.279733 203121 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1124 14:00:04.279800 203121 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1124 14:00:04.298211 203121 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1124 14:00:04.298994 203121 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1124 14:00:04.299292 203121 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1124 14:00:04.444292 203121 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1124 14:00:11.451050 203121 kubeadm.go:319] [apiclient] All control plane components are healthy after 7.006841 seconds
I1124 14:00:11.451179 203121 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1124 14:00:11.470632 203121 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1124 14:00:12.039593 203121 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1124 14:00:12.039804 203121 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-318786 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1124 14:00:12.553048 203121 kubeadm.go:319] [bootstrap-token] Using token: lzgex3.uugtb4pr04721m2a
I1124 14:00:12.555996 203121 out.go:252] - Configuring RBAC rules ...
I1124 14:00:12.556126 203121 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1124 14:00:12.561765 203121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1124 14:00:12.571340 203121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1124 14:00:12.578855 203121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1124 14:00:12.583307 203121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1124 14:00:12.587409 203121 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1124 14:00:12.604367 203121 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1124 14:00:12.927512 203121 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1124 14:00:12.998711 203121 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1124 14:00:13.009044 203121 kubeadm.go:319]
I1124 14:00:13.009136 203121 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1124 14:00:13.009150 203121 kubeadm.go:319]
I1124 14:00:13.009228 203121 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1124 14:00:13.009237 203121 kubeadm.go:319]
I1124 14:00:13.009262 203121 kubeadm.go:319] mkdir -p $HOME/.kube
I1124 14:00:13.009867 203121 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1124 14:00:13.009932 203121 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1124 14:00:13.009946 203121 kubeadm.go:319]
I1124 14:00:13.010001 203121 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1124 14:00:13.010013 203121 kubeadm.go:319]
I1124 14:00:13.010061 203121 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1124 14:00:13.010069 203121 kubeadm.go:319]
I1124 14:00:13.010122 203121 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1124 14:00:13.010202 203121 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1124 14:00:13.010274 203121 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1124 14:00:13.010281 203121 kubeadm.go:319]
I1124 14:00:13.010670 203121 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1124 14:00:13.010761 203121 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1124 14:00:13.010771 203121 kubeadm.go:319]
I1124 14:00:13.011083 203121 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token lzgex3.uugtb4pr04721m2a \
I1124 14:00:13.011197 203121 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:aa948289582a95f47bab77808ca51e5d74f41a914fe1740ab9448815f8011aac \
I1124 14:00:13.011480 203121 kubeadm.go:319] --control-plane
I1124 14:00:13.011502 203121 kubeadm.go:319]
I1124 14:00:13.011780 203121 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1124 14:00:13.011795 203121 kubeadm.go:319]
I1124 14:00:13.012105 203121 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token lzgex3.uugtb4pr04721m2a \
I1124 14:00:13.012432 203121 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:aa948289582a95f47bab77808ca51e5d74f41a914fe1740ab9448815f8011aac
I1124 14:00:13.016246 203121 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I1124 14:00:13.016372 203121 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1124 14:00:13.016396 203121 cni.go:84] Creating CNI manager for ""
I1124 14:00:13.016409 203121 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 14:00:13.019688 203121 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1124 14:00:13.022683 203121 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1124 14:00:13.034888 203121 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1124 14:00:13.034906 203121 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1124 14:00:13.059514 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1124 14:00:14.290955 203121 ssh_runner.go:235] Completed: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml: (1.231358047s)
I1124 14:00:14.291008 203121 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1124 14:00:14.291124 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:14.291189 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-318786 minikube.k8s.io/updated_at=2025_11_24T14_00_14_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=b5d1c9f4e75f4e638a533695fd62619949cefcab minikube.k8s.io/name=old-k8s-version-318786 minikube.k8s.io/primary=true
I1124 14:00:14.451653 203121 ops.go:34] apiserver oom_adj: -16
I1124 14:00:14.451772 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:14.952612 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:15.452444 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:15.952508 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:16.452482 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:16.952838 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:17.452425 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:17.951984 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:18.452384 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:18.952884 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:19.452844 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:19.951825 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:20.452041 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:20.954241 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:21.452323 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:21.952432 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:22.451804 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:22.951865 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:23.452374 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:23.952376 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:24.452544 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:24.952573 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:25.451889 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:25.951879 203121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 14:00:26.060844 203121 kubeadm.go:1114] duration metric: took 11.769763814s to wait for elevateKubeSystemPrivileges
I1124 14:00:26.060873 203121 kubeadm.go:403] duration metric: took 29.215284106s to StartCluster
I1124 14:00:26.060891 203121 settings.go:142] acquiring lock: {Name:mk2b0bbff4d8ced468f457362668d43b813dc062 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 14:00:26.060955 203121 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21932-2368/kubeconfig
I1124 14:00:26.061937 203121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-2368/kubeconfig: {Name:mk246d21eaffbd8aca2abdc1b2f89d6fcc902f28 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 14:00:26.062157 203121 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 14:00:26.062320 203121 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1124 14:00:26.062598 203121 config.go:182] Loaded profile config "old-k8s-version-318786": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 14:00:26.062635 203121 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1124 14:00:26.062693 203121 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-318786"
I1124 14:00:26.062708 203121 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-318786"
I1124 14:00:26.062728 203121 host.go:66] Checking if "old-k8s-version-318786" exists ...
I1124 14:00:26.063138 203121 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-318786"
I1124 14:00:26.063163 203121 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-318786"
I1124 14:00:26.063454 203121 cli_runner.go:164] Run: docker container inspect old-k8s-version-318786 --format={{.State.Status}}
I1124 14:00:26.063514 203121 cli_runner.go:164] Run: docker container inspect old-k8s-version-318786 --format={{.State.Status}}
I1124 14:00:26.066058 203121 out.go:179] * Verifying Kubernetes components...
I1124 14:00:26.069103 203121 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 14:00:26.111201 203121 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1124 14:00:26.116253 203121 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-318786"
I1124 14:00:26.116292 203121 host.go:66] Checking if "old-k8s-version-318786" exists ...
I1124 14:00:26.116709 203121 cli_runner.go:164] Run: docker container inspect old-k8s-version-318786 --format={{.State.Status}}
I1124 14:00:26.116830 203121 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1124 14:00:26.116844 203121 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1124 14:00:26.116892 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 14:00:26.150658 203121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa Username:docker}
I1124 14:00:26.161222 203121 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1124 14:00:26.161243 203121 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1124 14:00:26.161315 203121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-318786
I1124 14:00:26.189630 203121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33053 SSHKeyPath:/home/jenkins/minikube-integration/21932-2368/.minikube/machines/old-k8s-version-318786/id_rsa Username:docker}
I1124 14:00:26.424488 203121 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.85.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1124 14:00:26.425624 203121 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 14:00:26.485066 203121 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1124 14:00:26.513639 203121 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1124 14:00:27.429647 203121 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (1.003904202s)
I1124 14:00:27.430570 203121 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-318786" to be "Ready" ...
I1124 14:00:27.431403 203121 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.85.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.005751061s)
I1124 14:00:27.431468 203121 start.go:977] {"host.minikube.internal": 192.168.85.1} host record injected into CoreDNS's ConfigMap
I1124 14:00:27.809398 203121 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.324299578s)
I1124 14:00:27.809491 203121 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (1.295828834s)
I1124 14:00:27.819279 203121 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1124 14:00:27.822260 203121 addons.go:530] duration metric: took 1.759614941s for enable addons: enabled=[storage-provisioner default-storageclass]
I1124 14:00:27.936206 203121 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-318786" context rescaled to 1 replicas
W1124 14:00:29.434630 203121 node_ready.go:57] node "old-k8s-version-318786" has "Ready":"False" status (will retry)
W1124 14:00:31.933623 203121 node_ready.go:57] node "old-k8s-version-318786" has "Ready":"False" status (will retry)
W1124 14:00:33.934195 203121 node_ready.go:57] node "old-k8s-version-318786" has "Ready":"False" status (will retry)
W1124 14:00:36.434086 203121 node_ready.go:57] node "old-k8s-version-318786" has "Ready":"False" status (will retry)
W1124 14:00:38.434475 203121 node_ready.go:57] node "old-k8s-version-318786" has "Ready":"False" status (will retry)
I1124 14:00:39.437064 203121 node_ready.go:49] node "old-k8s-version-318786" is "Ready"
I1124 14:00:39.437091 203121 node_ready.go:38] duration metric: took 12.006466784s for node "old-k8s-version-318786" to be "Ready" ...
I1124 14:00:39.437104 203121 api_server.go:52] waiting for apiserver process to appear ...
I1124 14:00:39.437165 203121 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1124 14:00:39.453227 203121 api_server.go:72] duration metric: took 13.391041621s to wait for apiserver process to appear ...
I1124 14:00:39.453251 203121 api_server.go:88] waiting for apiserver healthz status ...
I1124 14:00:39.453271 203121 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1124 14:00:39.462068 203121 api_server.go:279] https://192.168.85.2:8443/healthz returned 200:
ok
I1124 14:00:39.463530 203121 api_server.go:141] control plane version: v1.28.0
I1124 14:00:39.463554 203121 api_server.go:131] duration metric: took 10.295662ms to wait for apiserver health ...
I1124 14:00:39.463563 203121 system_pods.go:43] waiting for kube-system pods to appear ...
I1124 14:00:39.467352 203121 system_pods.go:59] 8 kube-system pods found
I1124 14:00:39.467391 203121 system_pods.go:61] "coredns-5dd5756b68-n7s8h" [72202b02-1ca2-4c69-ad47-3f1ef90ba8ba] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 14:00:39.467397 203121 system_pods.go:61] "etcd-old-k8s-version-318786" [dd78fff3-f901-4dc0-9c77-907dbb69e36d] Running
I1124 14:00:39.467402 203121 system_pods.go:61] "kindnet-z4rkx" [053d781f-846e-4391-a537-edd057019339] Running
I1124 14:00:39.467406 203121 system_pods.go:61] "kube-apiserver-old-k8s-version-318786" [7f0596ec-97f5-4a70-974d-38c5d9a51273] Running
I1124 14:00:39.467410 203121 system_pods.go:61] "kube-controller-manager-old-k8s-version-318786" [4ae0e32a-b5f2-4e37-82d1-d76bfabbedd5] Running
I1124 14:00:39.467414 203121 system_pods.go:61] "kube-proxy-jwmdg" [11a8b197-dd22-45df-9593-66d16fdefa80] Running
I1124 14:00:39.467418 203121 system_pods.go:61] "kube-scheduler-old-k8s-version-318786" [01641e80-7a9e-48c2-b9e3-d384beab62d7] Running
I1124 14:00:39.467423 203121 system_pods.go:61] "storage-provisioner" [2298aa73-9529-42f0-a0ec-22197acfa4ba] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 14:00:39.467428 203121 system_pods.go:74] duration metric: took 3.859916ms to wait for pod list to return data ...
I1124 14:00:39.467435 203121 default_sa.go:34] waiting for default service account to be created ...
I1124 14:00:39.470040 203121 default_sa.go:45] found service account: "default"
I1124 14:00:39.470060 203121 default_sa.go:55] duration metric: took 2.619768ms for default service account to be created ...
I1124 14:00:39.470070 203121 system_pods.go:116] waiting for k8s-apps to be running ...
I1124 14:00:39.473490 203121 system_pods.go:86] 8 kube-system pods found
I1124 14:00:39.473522 203121 system_pods.go:89] "coredns-5dd5756b68-n7s8h" [72202b02-1ca2-4c69-ad47-3f1ef90ba8ba] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 14:00:39.473528 203121 system_pods.go:89] "etcd-old-k8s-version-318786" [dd78fff3-f901-4dc0-9c77-907dbb69e36d] Running
I1124 14:00:39.473534 203121 system_pods.go:89] "kindnet-z4rkx" [053d781f-846e-4391-a537-edd057019339] Running
I1124 14:00:39.473539 203121 system_pods.go:89] "kube-apiserver-old-k8s-version-318786" [7f0596ec-97f5-4a70-974d-38c5d9a51273] Running
I1124 14:00:39.473543 203121 system_pods.go:89] "kube-controller-manager-old-k8s-version-318786" [4ae0e32a-b5f2-4e37-82d1-d76bfabbedd5] Running
I1124 14:00:39.473547 203121 system_pods.go:89] "kube-proxy-jwmdg" [11a8b197-dd22-45df-9593-66d16fdefa80] Running
I1124 14:00:39.473552 203121 system_pods.go:89] "kube-scheduler-old-k8s-version-318786" [01641e80-7a9e-48c2-b9e3-d384beab62d7] Running
I1124 14:00:39.473558 203121 system_pods.go:89] "storage-provisioner" [2298aa73-9529-42f0-a0ec-22197acfa4ba] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 14:00:39.473585 203121 retry.go:31] will retry after 299.487693ms: missing components: kube-dns
I1124 14:00:39.780995 203121 system_pods.go:86] 8 kube-system pods found
I1124 14:00:39.781029 203121 system_pods.go:89] "coredns-5dd5756b68-n7s8h" [72202b02-1ca2-4c69-ad47-3f1ef90ba8ba] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 14:00:39.781036 203121 system_pods.go:89] "etcd-old-k8s-version-318786" [dd78fff3-f901-4dc0-9c77-907dbb69e36d] Running
I1124 14:00:39.781043 203121 system_pods.go:89] "kindnet-z4rkx" [053d781f-846e-4391-a537-edd057019339] Running
I1124 14:00:39.781047 203121 system_pods.go:89] "kube-apiserver-old-k8s-version-318786" [7f0596ec-97f5-4a70-974d-38c5d9a51273] Running
I1124 14:00:39.781051 203121 system_pods.go:89] "kube-controller-manager-old-k8s-version-318786" [4ae0e32a-b5f2-4e37-82d1-d76bfabbedd5] Running
I1124 14:00:39.781055 203121 system_pods.go:89] "kube-proxy-jwmdg" [11a8b197-dd22-45df-9593-66d16fdefa80] Running
I1124 14:00:39.781061 203121 system_pods.go:89] "kube-scheduler-old-k8s-version-318786" [01641e80-7a9e-48c2-b9e3-d384beab62d7] Running
I1124 14:00:39.781067 203121 system_pods.go:89] "storage-provisioner" [2298aa73-9529-42f0-a0ec-22197acfa4ba] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 14:00:39.781080 203121 retry.go:31] will retry after 249.97776ms: missing components: kube-dns
I1124 14:00:40.063092 203121 system_pods.go:86] 8 kube-system pods found
I1124 14:00:40.063130 203121 system_pods.go:89] "coredns-5dd5756b68-n7s8h" [72202b02-1ca2-4c69-ad47-3f1ef90ba8ba] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 14:00:40.063139 203121 system_pods.go:89] "etcd-old-k8s-version-318786" [dd78fff3-f901-4dc0-9c77-907dbb69e36d] Running
I1124 14:00:40.063145 203121 system_pods.go:89] "kindnet-z4rkx" [053d781f-846e-4391-a537-edd057019339] Running
I1124 14:00:40.063149 203121 system_pods.go:89] "kube-apiserver-old-k8s-version-318786" [7f0596ec-97f5-4a70-974d-38c5d9a51273] Running
I1124 14:00:40.063180 203121 system_pods.go:89] "kube-controller-manager-old-k8s-version-318786" [4ae0e32a-b5f2-4e37-82d1-d76bfabbedd5] Running
I1124 14:00:40.063193 203121 system_pods.go:89] "kube-proxy-jwmdg" [11a8b197-dd22-45df-9593-66d16fdefa80] Running
I1124 14:00:40.063198 203121 system_pods.go:89] "kube-scheduler-old-k8s-version-318786" [01641e80-7a9e-48c2-b9e3-d384beab62d7] Running
I1124 14:00:40.063203 203121 system_pods.go:89] "storage-provisioner" [2298aa73-9529-42f0-a0ec-22197acfa4ba] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 14:00:40.063219 203121 retry.go:31] will retry after 436.268576ms: missing components: kube-dns
I1124 14:00:40.504166 203121 system_pods.go:86] 8 kube-system pods found
I1124 14:00:40.504245 203121 system_pods.go:89] "coredns-5dd5756b68-n7s8h" [72202b02-1ca2-4c69-ad47-3f1ef90ba8ba] Running
I1124 14:00:40.504259 203121 system_pods.go:89] "etcd-old-k8s-version-318786" [dd78fff3-f901-4dc0-9c77-907dbb69e36d] Running
I1124 14:00:40.504264 203121 system_pods.go:89] "kindnet-z4rkx" [053d781f-846e-4391-a537-edd057019339] Running
I1124 14:00:40.504269 203121 system_pods.go:89] "kube-apiserver-old-k8s-version-318786" [7f0596ec-97f5-4a70-974d-38c5d9a51273] Running
I1124 14:00:40.504274 203121 system_pods.go:89] "kube-controller-manager-old-k8s-version-318786" [4ae0e32a-b5f2-4e37-82d1-d76bfabbedd5] Running
I1124 14:00:40.504279 203121 system_pods.go:89] "kube-proxy-jwmdg" [11a8b197-dd22-45df-9593-66d16fdefa80] Running
I1124 14:00:40.504283 203121 system_pods.go:89] "kube-scheduler-old-k8s-version-318786" [01641e80-7a9e-48c2-b9e3-d384beab62d7] Running
I1124 14:00:40.504287 203121 system_pods.go:89] "storage-provisioner" [2298aa73-9529-42f0-a0ec-22197acfa4ba] Running
I1124 14:00:40.504296 203121 system_pods.go:126] duration metric: took 1.034219513s to wait for k8s-apps to be running ...
I1124 14:00:40.504307 203121 system_svc.go:44] waiting for kubelet service to be running ....
I1124 14:00:40.504364 203121 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 14:00:40.517880 203121 system_svc.go:56] duration metric: took 13.563315ms WaitForService to wait for kubelet
I1124 14:00:40.517964 203121 kubeadm.go:587] duration metric: took 14.455781279s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 14:00:40.517991 203121 node_conditions.go:102] verifying NodePressure condition ...
I1124 14:00:40.520930 203121 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1124 14:00:40.520963 203121 node_conditions.go:123] node cpu capacity is 2
I1124 14:00:40.520978 203121 node_conditions.go:105] duration metric: took 2.980003ms to run NodePressure ...
I1124 14:00:40.520990 203121 start.go:242] waiting for startup goroutines ...
I1124 14:00:40.520998 203121 start.go:247] waiting for cluster config update ...
I1124 14:00:40.521010 203121 start.go:256] writing updated cluster config ...
I1124 14:00:40.521298 203121 ssh_runner.go:195] Run: rm -f paused
I1124 14:00:40.525324 203121 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 14:00:40.529797 203121 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-n7s8h" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:40.535607 203121 pod_ready.go:94] pod "coredns-5dd5756b68-n7s8h" is "Ready"
I1124 14:00:40.535639 203121 pod_ready.go:86] duration metric: took 5.816258ms for pod "coredns-5dd5756b68-n7s8h" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:40.539181 203121 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:40.544657 203121 pod_ready.go:94] pod "etcd-old-k8s-version-318786" is "Ready"
I1124 14:00:40.544685 203121 pod_ready.go:86] duration metric: took 5.478924ms for pod "etcd-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:40.548165 203121 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:40.553506 203121 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-318786" is "Ready"
I1124 14:00:40.553538 203121 pod_ready.go:86] duration metric: took 5.343284ms for pod "kube-apiserver-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:40.556924 203121 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:40.929692 203121 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-318786" is "Ready"
I1124 14:00:40.929725 203121 pod_ready.go:86] duration metric: took 372.7723ms for pod "kube-controller-manager-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:41.130990 203121 pod_ready.go:83] waiting for pod "kube-proxy-jwmdg" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:41.530005 203121 pod_ready.go:94] pod "kube-proxy-jwmdg" is "Ready"
I1124 14:00:41.530034 203121 pod_ready.go:86] duration metric: took 399.016962ms for pod "kube-proxy-jwmdg" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:41.730026 203121 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:42.131071 203121 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-318786" is "Ready"
I1124 14:00:42.131114 203121 pod_ready.go:86] duration metric: took 401.061008ms for pod "kube-scheduler-old-k8s-version-318786" in "kube-system" namespace to be "Ready" or be gone ...
I1124 14:00:42.131129 203121 pod_ready.go:40] duration metric: took 1.60575817s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 14:00:42.200914 203121 start.go:625] kubectl: 1.33.2, cluster: 1.28.0 (minor skew: 5)
I1124 14:00:42.204172 203121 out.go:203]
W1124 14:00:42.207213 203121 out.go:285] ! /usr/local/bin/kubectl is version 1.33.2, which may have incompatibilities with Kubernetes 1.28.0.
I1124 14:00:42.210285 203121 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1124 14:00:42.214390 203121 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-318786" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
70e558ad037eb 1611cd07b61d5 9 seconds ago Running busybox 0 f472329e9fd63 busybox default
33ca9b6d24a80 ba04bb24b9575 15 seconds ago Running storage-provisioner 0 9de766e43deb4 storage-provisioner kube-system
d1e4297a18de5 97e04611ad434 15 seconds ago Running coredns 0 1b5cb0ca09af2 coredns-5dd5756b68-n7s8h kube-system
8a5ceb46ea7cb b1a8c6f707935 26 seconds ago Running kindnet-cni 0 e9b4fd516b97c kindnet-z4rkx kube-system
e431b25999ece 940f54a5bcae9 28 seconds ago Running kube-proxy 0 68384e9c54fe8 kube-proxy-jwmdg kube-system
64ea1db6adeec 00543d2fe5d71 49 seconds ago Running kube-apiserver 0 6e10952c6964b kube-apiserver-old-k8s-version-318786 kube-system
d422fb0577ca7 46cc66ccc7c19 49 seconds ago Running kube-controller-manager 0 ede8e07dcdc74 kube-controller-manager-old-k8s-version-318786 kube-system
0769df21ce83c 762dce4090c5f 49 seconds ago Running kube-scheduler 0 6729e51d9cdf6 kube-scheduler-old-k8s-version-318786 kube-system
a96dcde7b48e2 9cdd6470f48c8 49 seconds ago Running etcd 0 388ca052bc258 etcd-old-k8s-version-318786 kube-system
==> containerd <==
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.610323957Z" level=info msg="connecting to shim d1e4297a18de5a35eef1e955a0f6b73d8881ba2296e59d8acaed4614dce5de51" address="unix:///run/containerd/s/f62f275e67577be37030e893196dc98d73b2044e58d241d1a7f99ccee4904d24" protocol=ttrpc version=3
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.620434869Z" level=info msg="CreateContainer within sandbox \"9de766e43deb416449962bc7301bab891c72b0af9fb329bb4d8e4ff8ef66bff4\" for container &ContainerMetadata{Name:storage-provisioner,Attempt:0,}"
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.644253353Z" level=info msg="Container 33ca9b6d24a80a1f0470355c5dc5bf87df622a7ffd33dad20b3a66e3d42820fb: CDI devices from CRI Config.CDIDevices: []"
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.663968611Z" level=info msg="CreateContainer within sandbox \"9de766e43deb416449962bc7301bab891c72b0af9fb329bb4d8e4ff8ef66bff4\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"33ca9b6d24a80a1f0470355c5dc5bf87df622a7ffd33dad20b3a66e3d42820fb\""
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.665502745Z" level=info msg="StartContainer for \"33ca9b6d24a80a1f0470355c5dc5bf87df622a7ffd33dad20b3a66e3d42820fb\""
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.666531739Z" level=info msg="connecting to shim 33ca9b6d24a80a1f0470355c5dc5bf87df622a7ffd33dad20b3a66e3d42820fb" address="unix:///run/containerd/s/25a7b18f3f0941131e8c32d45d1f9f3bcee38bf8a73b1e3195d36d7532fce44f" protocol=ttrpc version=3
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.711141089Z" level=info msg="StartContainer for \"d1e4297a18de5a35eef1e955a0f6b73d8881ba2296e59d8acaed4614dce5de51\" returns successfully"
Nov 24 14:00:39 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:39.756705480Z" level=info msg="StartContainer for \"33ca9b6d24a80a1f0470355c5dc5bf87df622a7ffd33dad20b3a66e3d42820fb\" returns successfully"
Nov 24 14:00:42 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:42.744539553Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:f566ecf0-6907-464c-9185-0f1cac06d38f,Namespace:default,Attempt:0,}"
Nov 24 14:00:42 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:42.796555926Z" level=info msg="connecting to shim f472329e9fd635f4d2ecb8d02d86100f8c593bf1ea6b1e68f6aab8b27bbcb144" address="unix:///run/containerd/s/e47b16e174c686888228b35f0ff63c9e1e5e13d47c7f7c2e532fdeedd0981c84" namespace=k8s.io protocol=ttrpc version=3
Nov 24 14:00:42 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:42.853864201Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:f566ecf0-6907-464c-9185-0f1cac06d38f,Namespace:default,Attempt:0,} returns sandbox id \"f472329e9fd635f4d2ecb8d02d86100f8c593bf1ea6b1e68f6aab8b27bbcb144\""
Nov 24 14:00:42 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:42.855634629Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.151334885Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.153450408Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=1937184"
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.156363448Z" level=info msg="ImageCreate event name:\"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.161551496Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.162193515Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"1935750\" in 2.306509548s"
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.162249565Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\""
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.179990125Z" level=info msg="CreateContainer within sandbox \"f472329e9fd635f4d2ecb8d02d86100f8c593bf1ea6b1e68f6aab8b27bbcb144\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.248189267Z" level=info msg="Container 70e558ad037eb593fa44b07e4fd36f48454dee00712743ce51a58d742a33605b: CDI devices from CRI Config.CDIDevices: []"
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.262062616Z" level=info msg="CreateContainer within sandbox \"f472329e9fd635f4d2ecb8d02d86100f8c593bf1ea6b1e68f6aab8b27bbcb144\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"70e558ad037eb593fa44b07e4fd36f48454dee00712743ce51a58d742a33605b\""
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.263385826Z" level=info msg="StartContainer for \"70e558ad037eb593fa44b07e4fd36f48454dee00712743ce51a58d742a33605b\""
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.265058480Z" level=info msg="connecting to shim 70e558ad037eb593fa44b07e4fd36f48454dee00712743ce51a58d742a33605b" address="unix:///run/containerd/s/e47b16e174c686888228b35f0ff63c9e1e5e13d47c7f7c2e532fdeedd0981c84" protocol=ttrpc version=3
Nov 24 14:00:45 old-k8s-version-318786 containerd[755]: time="2025-11-24T14:00:45.370550827Z" level=info msg="StartContainer for \"70e558ad037eb593fa44b07e4fd36f48454dee00712743ce51a58d742a33605b\" returns successfully"
Nov 24 14:00:51 old-k8s-version-318786 containerd[755]: E1124 14:00:51.571973 755 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [d1e4297a18de5a35eef1e955a0f6b73d8881ba2296e59d8acaed4614dce5de51] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 8aa94104b4dae56b00431f7362ac05b997af2246775de35dc2eb361b0707b2fa7199f9ddfdba27fdef1331b76d09c41700f6cb5d00836dabab7c0df8e651283f
CoreDNS-1.10.1
linux/arm64, go1.20, 055b2c3
[INFO] 127.0.0.1:60396 - 50045 "HINFO IN 8149976766644082851.319243235608499577. udp 56 false 512" NXDOMAIN qr,rd,ra 56 0.006788489s
==> describe nodes <==
Name: old-k8s-version-318786
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=old-k8s-version-318786
kubernetes.io/os=linux
minikube.k8s.io/commit=b5d1c9f4e75f4e638a533695fd62619949cefcab
minikube.k8s.io/name=old-k8s-version-318786
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_24T14_00_14_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 24 Nov 2025 14:00:09 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-318786
AcquireTime: <unset>
RenewTime: Mon, 24 Nov 2025 14:00:53 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 24 Nov 2025 14:00:43 +0000 Mon, 24 Nov 2025 14:00:06 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 24 Nov 2025 14:00:43 +0000 Mon, 24 Nov 2025 14:00:06 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 24 Nov 2025 14:00:43 +0000 Mon, 24 Nov 2025 14:00:06 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 24 Nov 2025 14:00:43 +0000 Mon, 24 Nov 2025 14:00:39 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.85.2
Hostname: old-k8s-version-318786
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 7283ea1857f18f20a875c29069214c9d
System UUID: 259561de-786f-47f9-8e4d-12bddad03b80
Boot ID: dd480c26-e101-4930-b98c-54c06b430fdc
Kernel Version: 5.15.0-1084-aws
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13s
kube-system coredns-5dd5756b68-n7s8h 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 30s
kube-system etcd-old-k8s-version-318786 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 42s
kube-system kindnet-z4rkx 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 30s
kube-system kube-apiserver-old-k8s-version-318786 250m (12%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system kube-controller-manager-old-k8s-version-318786 200m (10%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system kube-proxy-jwmdg 0 (0%) 0 (0%) 0 (0%) 0 (0%) 30s
kube-system kube-scheduler-old-k8s-version-318786 100m (5%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 28s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 100m (5%)
memory 220Mi (2%) 220Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 28s kube-proxy
Normal Starting 43s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 42s kubelet Node old-k8s-version-318786 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 42s kubelet Node old-k8s-version-318786 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 42s kubelet Node old-k8s-version-318786 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 42s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 30s node-controller Node old-k8s-version-318786 event: Registered Node old-k8s-version-318786 in Controller
Normal NodeReady 16s kubelet Node old-k8s-version-318786 status is now: NodeReady
==> dmesg <==
[Nov24 12:17] ACPI: SRAT not present
[ +0.000000] ACPI: SRAT not present
[ +0.000000] SPI driver altr_a10sr has no spi_device_id for altr,a10sr
[ +0.014697] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.497291] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.033884] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +0.804993] ena 0000:00:05.0: LLQ is not supported Fallback to host mode policy.
[ +6.476130] kauditd_printk_skb: 36 callbacks suppressed
==> etcd [a96dcde7b48e2020162f86ef991d82171cf903dc40c2588013e878e07607a6eb] <==
{"level":"info","ts":"2025-11-24T14:00:05.836588Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed switched to configuration voters=(11459225503572592365)"}
{"level":"info","ts":"2025-11-24T14:00:05.836695Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"68eaea490fab4e05","local-member-id":"9f0758e1c58a86ed","added-peer-id":"9f0758e1c58a86ed","added-peer-peer-urls":["https://192.168.85.2:2380"]}
{"level":"info","ts":"2025-11-24T14:00:05.836985Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-11-24T14:00:05.83715Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"9f0758e1c58a86ed","initial-advertise-peer-urls":["https://192.168.85.2:2380"],"listen-peer-urls":["https://192.168.85.2:2380"],"advertise-client-urls":["https://192.168.85.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.85.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-24T14:00:05.837189Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-24T14:00:05.837186Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.85.2:2380"}
{"level":"info","ts":"2025-11-24T14:00:05.837211Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.85.2:2380"}
{"level":"info","ts":"2025-11-24T14:00:06.715956Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed is starting a new election at term 1"}
{"level":"info","ts":"2025-11-24T14:00:06.716187Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-24T14:00:06.716278Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed received MsgPreVoteResp from 9f0758e1c58a86ed at term 1"}
{"level":"info","ts":"2025-11-24T14:00:06.716408Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed became candidate at term 2"}
{"level":"info","ts":"2025-11-24T14:00:06.716497Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed received MsgVoteResp from 9f0758e1c58a86ed at term 2"}
{"level":"info","ts":"2025-11-24T14:00:06.716591Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed became leader at term 2"}
{"level":"info","ts":"2025-11-24T14:00:06.716663Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: 9f0758e1c58a86ed elected leader 9f0758e1c58a86ed at term 2"}
{"level":"info","ts":"2025-11-24T14:00:06.719119Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T14:00:06.724173Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"9f0758e1c58a86ed","local-member-attributes":"{Name:old-k8s-version-318786 ClientURLs:[https://192.168.85.2:2379]}","request-path":"/0/members/9f0758e1c58a86ed/attributes","cluster-id":"68eaea490fab4e05","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-24T14:00:06.727971Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"68eaea490fab4e05","local-member-id":"9f0758e1c58a86ed","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T14:00:06.728194Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T14:00:06.728301Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T14:00:06.728041Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-24T14:00:06.732297Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.85.2:2379"}
{"level":"info","ts":"2025-11-24T14:00:06.728075Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-24T14:00:06.73389Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-24T14:00:06.739971Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-24T14:00:06.747818Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
==> kernel <==
14:00:55 up 1:43, 0 user, load average: 3.35, 3.70, 3.04
Linux old-k8s-version-318786 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [8a5ceb46ea7cbcd9a345bdf9ba11d0c7a3a990148842c5c44246730c76d8948d] <==
I1124 14:00:28.769606 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1124 14:00:28.860713 1 main.go:139] hostIP = 192.168.85.2
podIP = 192.168.85.2
I1124 14:00:28.860851 1 main.go:148] setting mtu 1500 for CNI
I1124 14:00:28.860870 1 main.go:178] kindnetd IP family: "ipv4"
I1124 14:00:28.860885 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-24T14:00:29Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1124 14:00:29.062756 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1124 14:00:29.064202 1 controller.go:381] "Waiting for informer caches to sync"
I1124 14:00:29.064283 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1124 14:00:29.064439 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1124 14:00:29.264984 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1124 14:00:29.265101 1 metrics.go:72] Registering metrics
I1124 14:00:29.265206 1 controller.go:711] "Syncing nftables rules"
I1124 14:00:39.066105 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1124 14:00:39.066164 1 main.go:301] handling current node
I1124 14:00:49.064077 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1124 14:00:49.064224 1 main.go:301] handling current node
==> kube-apiserver [64ea1db6adeecccf4211992b471a4088bba1825d5764c029cd41c736f16d8131] <==
I1124 14:00:09.559574 1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
I1124 14:00:09.559602 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1124 14:00:09.567371 1 aggregator.go:166] initial CRD sync complete...
I1124 14:00:09.567396 1 autoregister_controller.go:141] Starting autoregister controller
I1124 14:00:09.567404 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1124 14:00:09.567413 1 cache.go:39] Caches are synced for autoregister controller
I1124 14:00:09.570195 1 controller.go:624] quota admission added evaluator for: namespaces
E1124 14:00:09.602455 1 controller.go:146] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms"
I1124 14:00:09.654324 1 shared_informer.go:318] Caches are synced for node_authorizer
I1124 14:00:09.818311 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1124 14:00:10.356017 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1124 14:00:10.369141 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1124 14:00:10.369180 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1124 14:00:11.220927 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1124 14:00:11.271999 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1124 14:00:11.406464 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1124 14:00:11.418391 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.85.2]
I1124 14:00:11.420227 1 controller.go:624] quota admission added evaluator for: endpoints
I1124 14:00:11.426883 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1124 14:00:11.578646 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1124 14:00:12.895802 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1124 14:00:12.925996 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1124 14:00:12.938109 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1124 14:00:25.666171 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1124 14:00:25.763116 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [d422fb0577ca71bb502e21fc4c5afd81d722a57cf4424a6d0acafef3ae4afb9a] <==
I1124 14:00:25.810858 1 range_allocator.go:380] "Set node PodCIDR" node="old-k8s-version-318786" podCIDRs=["10.244.0.0/24"]
I1124 14:00:25.820650 1 event.go:307] "Event occurred" object="kube-system/kube-scheduler-old-k8s-version-318786" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1124 14:00:25.832097 1 event.go:307] "Event occurred" object="kube-system/kube-controller-manager-old-k8s-version-318786" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1124 14:00:25.835948 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-n7s8h"
I1124 14:00:25.836226 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-z4rkx"
I1124 14:00:25.844347 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-jwmdg"
I1124 14:00:25.872136 1 shared_informer.go:318] Caches are synced for HPA
I1124 14:00:25.873361 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-nhcwg"
I1124 14:00:25.905108 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="206.561387ms"
I1124 14:00:25.943326 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="37.986992ms"
I1124 14:00:25.943650 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="78.392µs"
I1124 14:00:26.225808 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 14:00:26.225842 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1124 14:00:26.240729 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 14:00:27.499329 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1124 14:00:27.521996 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-nhcwg"
I1124 14:00:27.537841 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="38.305665ms"
I1124 14:00:27.559719 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="21.827601ms"
I1124 14:00:27.559805 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="51.644µs"
I1124 14:00:39.122848 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="83.538µs"
I1124 14:00:39.150933 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="85.22µs"
I1124 14:00:40.276969 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="184.922µs"
I1124 14:00:40.328812 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="12.750431ms"
I1124 14:00:40.330201 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="64.312µs"
I1124 14:00:40.747463 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [e431b25999ece5eb3499ee68f2c85868448494e4787845d9737ad20b4a20f2f8] <==
I1124 14:00:26.865991 1 server_others.go:69] "Using iptables proxy"
I1124 14:00:26.884883 1 node.go:141] Successfully retrieved node IP: 192.168.85.2
I1124 14:00:26.934067 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1124 14:00:26.935893 1 server_others.go:152] "Using iptables Proxier"
I1124 14:00:26.936119 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1124 14:00:26.936132 1 server_others.go:438] "Defaulting to no-op detect-local"
I1124 14:00:26.936170 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1124 14:00:26.936420 1 server.go:846] "Version info" version="v1.28.0"
I1124 14:00:26.936439 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1124 14:00:26.937512 1 config.go:188] "Starting service config controller"
I1124 14:00:26.937582 1 shared_informer.go:311] Waiting for caches to sync for service config
I1124 14:00:26.937602 1 config.go:97] "Starting endpoint slice config controller"
I1124 14:00:26.937606 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1124 14:00:26.938430 1 config.go:315] "Starting node config controller"
I1124 14:00:26.938440 1 shared_informer.go:311] Waiting for caches to sync for node config
I1124 14:00:27.039179 1 shared_informer.go:318] Caches are synced for node config
I1124 14:00:27.039222 1 shared_informer.go:318] Caches are synced for service config
I1124 14:00:27.039271 1 shared_informer.go:318] Caches are synced for endpoint slice config
==> kube-scheduler [0769df21ce83c4995a35d15a4e7ae3000b8a5d86168fda1bff6738b8943c92ef] <==
W1124 14:00:10.860716 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1124 14:00:10.860734 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1124 14:00:10.861473 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1124 14:00:10.861503 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W1124 14:00:10.866658 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1124 14:00:10.866694 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1124 14:00:10.866737 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1124 14:00:10.866752 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1124 14:00:10.867029 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1124 14:00:10.867053 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1124 14:00:10.867116 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1124 14:00:10.867134 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1124 14:00:10.867194 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1124 14:00:10.867211 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1124 14:00:10.867277 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1124 14:00:10.867299 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1124 14:00:10.869201 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1124 14:00:10.869232 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1124 14:00:10.869290 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1124 14:00:10.869420 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1124 14:00:10.869379 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1124 14:00:10.869453 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1124 14:00:10.870338 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1124 14:00:10.870513 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
I1124 14:00:11.746244 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 24 14:00:25 old-k8s-version-318786 kubelet[1527]: I1124 14:00:25.889706 1527 kuberuntime_manager.go:1463] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Nov 24 14:00:25 old-k8s-version-318786 kubelet[1527]: I1124 14:00:25.891508 1527 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 24 14:00:25 old-k8s-version-318786 kubelet[1527]: I1124 14:00:25.894804 1527 topology_manager.go:215] "Topology Admit Handler" podUID="11a8b197-dd22-45df-9593-66d16fdefa80" podNamespace="kube-system" podName="kube-proxy-jwmdg"
Nov 24 14:00:25 old-k8s-version-318786 kubelet[1527]: I1124 14:00:25.914677 1527 topology_manager.go:215] "Topology Admit Handler" podUID="053d781f-846e-4391-a537-edd057019339" podNamespace="kube-system" podName="kindnet-z4rkx"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018048 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/053d781f-846e-4391-a537-edd057019339-lib-modules\") pod \"kindnet-z4rkx\" (UID: \"053d781f-846e-4391-a537-edd057019339\") " pod="kube-system/kindnet-z4rkx"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018107 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/11a8b197-dd22-45df-9593-66d16fdefa80-kube-proxy\") pod \"kube-proxy-jwmdg\" (UID: \"11a8b197-dd22-45df-9593-66d16fdefa80\") " pod="kube-system/kube-proxy-jwmdg"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018131 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/11a8b197-dd22-45df-9593-66d16fdefa80-lib-modules\") pod \"kube-proxy-jwmdg\" (UID: \"11a8b197-dd22-45df-9593-66d16fdefa80\") " pod="kube-system/kube-proxy-jwmdg"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018158 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/11a8b197-dd22-45df-9593-66d16fdefa80-xtables-lock\") pod \"kube-proxy-jwmdg\" (UID: \"11a8b197-dd22-45df-9593-66d16fdefa80\") " pod="kube-system/kube-proxy-jwmdg"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018212 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wldf\" (UniqueName: \"kubernetes.io/projected/053d781f-846e-4391-a537-edd057019339-kube-api-access-2wldf\") pod \"kindnet-z4rkx\" (UID: \"053d781f-846e-4391-a537-edd057019339\") " pod="kube-system/kindnet-z4rkx"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018240 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/053d781f-846e-4391-a537-edd057019339-cni-cfg\") pod \"kindnet-z4rkx\" (UID: \"053d781f-846e-4391-a537-edd057019339\") " pod="kube-system/kindnet-z4rkx"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018265 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/053d781f-846e-4391-a537-edd057019339-xtables-lock\") pod \"kindnet-z4rkx\" (UID: \"053d781f-846e-4391-a537-edd057019339\") " pod="kube-system/kindnet-z4rkx"
Nov 24 14:00:26 old-k8s-version-318786 kubelet[1527]: I1124 14:00:26.018289 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zj98c\" (UniqueName: \"kubernetes.io/projected/11a8b197-dd22-45df-9593-66d16fdefa80-kube-api-access-zj98c\") pod \"kube-proxy-jwmdg\" (UID: \"11a8b197-dd22-45df-9593-66d16fdefa80\") " pod="kube-system/kube-proxy-jwmdg"
Nov 24 14:00:27 old-k8s-version-318786 kubelet[1527]: I1124 14:00:27.246948 1527 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-jwmdg" podStartSLOduration=2.246903083 podCreationTimestamp="2025-11-24 14:00:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:00:27.246446757 +0000 UTC m=+14.385235109" watchObservedRunningTime="2025-11-24 14:00:27.246903083 +0000 UTC m=+14.385691436"
Nov 24 14:00:33 old-k8s-version-318786 kubelet[1527]: I1124 14:00:33.074010 1527 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-z4rkx" podStartSLOduration=5.998729082 podCreationTimestamp="2025-11-24 14:00:25 +0000 UTC" firstStartedPulling="2025-11-24 14:00:26.522078288 +0000 UTC m=+13.660866641" lastFinishedPulling="2025-11-24 14:00:28.597316912 +0000 UTC m=+15.736105264" observedRunningTime="2025-11-24 14:00:29.252063076 +0000 UTC m=+16.390851428" watchObservedRunningTime="2025-11-24 14:00:33.073967705 +0000 UTC m=+20.212756058"
Nov 24 14:00:39 old-k8s-version-318786 kubelet[1527]: I1124 14:00:39.082518 1527 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 24 14:00:39 old-k8s-version-318786 kubelet[1527]: I1124 14:00:39.121702 1527 topology_manager.go:215] "Topology Admit Handler" podUID="72202b02-1ca2-4c69-ad47-3f1ef90ba8ba" podNamespace="kube-system" podName="coredns-5dd5756b68-n7s8h"
Nov 24 14:00:39 old-k8s-version-318786 kubelet[1527]: I1124 14:00:39.132093 1527 topology_manager.go:215] "Topology Admit Handler" podUID="2298aa73-9529-42f0-a0ec-22197acfa4ba" podNamespace="kube-system" podName="storage-provisioner"
Nov 24 14:00:39 old-k8s-version-318786 kubelet[1527]: I1124 14:00:39.309362 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68nxx\" (UniqueName: \"kubernetes.io/projected/72202b02-1ca2-4c69-ad47-3f1ef90ba8ba-kube-api-access-68nxx\") pod \"coredns-5dd5756b68-n7s8h\" (UID: \"72202b02-1ca2-4c69-ad47-3f1ef90ba8ba\") " pod="kube-system/coredns-5dd5756b68-n7s8h"
Nov 24 14:00:39 old-k8s-version-318786 kubelet[1527]: I1124 14:00:39.309430 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z64wd\" (UniqueName: \"kubernetes.io/projected/2298aa73-9529-42f0-a0ec-22197acfa4ba-kube-api-access-z64wd\") pod \"storage-provisioner\" (UID: \"2298aa73-9529-42f0-a0ec-22197acfa4ba\") " pod="kube-system/storage-provisioner"
Nov 24 14:00:39 old-k8s-version-318786 kubelet[1527]: I1124 14:00:39.309458 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72202b02-1ca2-4c69-ad47-3f1ef90ba8ba-config-volume\") pod \"coredns-5dd5756b68-n7s8h\" (UID: \"72202b02-1ca2-4c69-ad47-3f1ef90ba8ba\") " pod="kube-system/coredns-5dd5756b68-n7s8h"
Nov 24 14:00:39 old-k8s-version-318786 kubelet[1527]: I1124 14:00:39.309484 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/2298aa73-9529-42f0-a0ec-22197acfa4ba-tmp\") pod \"storage-provisioner\" (UID: \"2298aa73-9529-42f0-a0ec-22197acfa4ba\") " pod="kube-system/storage-provisioner"
Nov 24 14:00:40 old-k8s-version-318786 kubelet[1527]: I1124 14:00:40.295007 1527 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-n7s8h" podStartSLOduration=15.294930673 podCreationTimestamp="2025-11-24 14:00:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:00:40.27945045 +0000 UTC m=+27.418238811" watchObservedRunningTime="2025-11-24 14:00:40.294930673 +0000 UTC m=+27.433719026"
Nov 24 14:00:40 old-k8s-version-318786 kubelet[1527]: I1124 14:00:40.313747 1527 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=13.313703157 podCreationTimestamp="2025-11-24 14:00:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 14:00:40.296336403 +0000 UTC m=+27.435124755" watchObservedRunningTime="2025-11-24 14:00:40.313703157 +0000 UTC m=+27.452491510"
Nov 24 14:00:42 old-k8s-version-318786 kubelet[1527]: I1124 14:00:42.439571 1527 topology_manager.go:215] "Topology Admit Handler" podUID="f566ecf0-6907-464c-9185-0f1cac06d38f" podNamespace="default" podName="busybox"
Nov 24 14:00:42 old-k8s-version-318786 kubelet[1527]: I1124 14:00:42.534626 1527 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9mqp\" (UniqueName: \"kubernetes.io/projected/f566ecf0-6907-464c-9185-0f1cac06d38f-kube-api-access-t9mqp\") pod \"busybox\" (UID: \"f566ecf0-6907-464c-9185-0f1cac06d38f\") " pod="default/busybox"
==> storage-provisioner [33ca9b6d24a80a1f0470355c5dc5bf87df622a7ffd33dad20b3a66e3d42820fb] <==
I1124 14:00:39.762113 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1124 14:00:39.776081 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1124 14:00:39.776154 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1124 14:00:39.787120 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1124 14:00:39.787379 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-318786_ab0f5e48-32a3-4e29-9ee1-b1971bc22e35!
I1124 14:00:39.788450 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"fe6ba064-a6c2-4186-b355-eb48ac5eb1d0", APIVersion:"v1", ResourceVersion:"412", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-318786_ab0f5e48-32a3-4e29-9ee1-b1971bc22e35 became leader
I1124 14:00:39.888593 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-318786_ab0f5e48-32a3-4e29-9ee1-b1971bc22e35!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-318786 -n old-k8s-version-318786
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-318786 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestStartStop/group/old-k8s-version/serial/DeployApp (13.81s)