=== RUN TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-098965 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [b377806c-ae20-44d2-9d0f-07b097026328] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [b377806c-ae20-44d2-9d0f-07b097026328] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 8.003571415s
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-098965 exec busybox -- /bin/sh -c "ulimit -n"
start_stop_delete_test.go:194: 'ulimit -n' returned 1024, expected 1048576
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-098965
helpers_test.go:243: (dbg) docker inspect old-k8s-version-098965:
-- stdout --
[
{
"Id": "51b62bc50b581270fcb4bc2e1c574a9a6681d89c3887762aa06dd29ac0c65022",
"Created": "2025-11-24T03:37:06.167962609Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 457210,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-24T03:37:06.24041942Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:fbb44bc62521f331457dff002aaa5e1e27856f9e53853b3b3ee62969be454028",
"ResolvConfPath": "/var/lib/docker/containers/51b62bc50b581270fcb4bc2e1c574a9a6681d89c3887762aa06dd29ac0c65022/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/51b62bc50b581270fcb4bc2e1c574a9a6681d89c3887762aa06dd29ac0c65022/hostname",
"HostsPath": "/var/lib/docker/containers/51b62bc50b581270fcb4bc2e1c574a9a6681d89c3887762aa06dd29ac0c65022/hosts",
"LogPath": "/var/lib/docker/containers/51b62bc50b581270fcb4bc2e1c574a9a6681d89c3887762aa06dd29ac0c65022/51b62bc50b581270fcb4bc2e1c574a9a6681d89c3887762aa06dd29ac0c65022-json.log",
"Name": "/old-k8s-version-098965",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"old-k8s-version-098965:/var",
"/lib/modules:/lib/modules:ro"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "old-k8s-version-098965",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "51b62bc50b581270fcb4bc2e1c574a9a6681d89c3887762aa06dd29ac0c65022",
"LowerDir": "/var/lib/docker/overlay2/8effb39b7e48dc2e06628c564f9eb8d7a6134b67b474f4243a9f92d81eed72e6-init/diff:/var/lib/docker/overlay2/11b197f530f0d571f61892814d8d4c774f7d3e5a97abdd8c5aa182cc99b2d856/diff",
"MergedDir": "/var/lib/docker/overlay2/8effb39b7e48dc2e06628c564f9eb8d7a6134b67b474f4243a9f92d81eed72e6/merged",
"UpperDir": "/var/lib/docker/overlay2/8effb39b7e48dc2e06628c564f9eb8d7a6134b67b474f4243a9f92d81eed72e6/diff",
"WorkDir": "/var/lib/docker/overlay2/8effb39b7e48dc2e06628c564f9eb8d7a6134b67b474f4243a9f92d81eed72e6/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "old-k8s-version-098965",
"Source": "/var/lib/docker/volumes/old-k8s-version-098965/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "old-k8s-version-098965",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-098965",
"name.minikube.sigs.k8s.io": "old-k8s-version-098965",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "1fdc3bd4111da77a7219abec40237713d3aafb5294361ea9ac940f031b5e9874",
"SandboxKey": "/var/run/docker/netns/1fdc3bd4111d",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33418"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33419"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33422"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33420"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33421"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"old-k8s-version-098965": {
"IPAMConfig": {
"IPv4Address": "192.168.85.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "4e:8b:8f:f7:48:e2",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "a787be3020cdc92e0572d92f4bf90ce3f3c7948fc2d2deef82cd4a5f099c319a",
"EndpointID": "0c39f7b7035a14f48a48394a60897ac0eb2db5edb711c6ca54097ce4804ab54d",
"Gateway": "192.168.85.1",
"IPAddress": "192.168.85.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-098965",
"51b62bc50b58"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-098965 -n old-k8s-version-098965
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p old-k8s-version-098965 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p old-k8s-version-098965 logs -n 25: (1.215717345s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────────┬─────────┬─────────┬─────────────────────┬───────────
──────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────────┼─────────┼─────────┼─────────────────────┼───────────
──────────┤
│ ssh │ -p cilium-842431 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo cri-dockerd --version │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo systemctl status containerd --all --full --no-pager │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo systemctl cat containerd --no-pager │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo cat /lib/systemd/system/containerd.service │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo cat /etc/containerd/config.toml │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo containerd config dump │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo systemctl status crio --all --full --no-pager │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo systemctl cat crio --no-pager │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo crio config │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ delete │ -p cilium-842431 │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ 24 Nov 25 03:35 UTC │
│ start │ -p force-systemd-env-574539 --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-env-574539 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ 24 Nov 25 03:36 UTC │
│ start │ -p kubernetes-upgrade-850960 --memory=3072 --kubernetes-version=v1.28.0 --driver=docker --container-runtime=containerd │ kubernetes-upgrade-850960 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ start │ -p kubernetes-upgrade-850960 --memory=3072 --kubernetes-version=v1.34.1 --alsologtostderr -v=1 --driver=docker --container-runtime=containerd │ kubernetes-upgrade-850960 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ 24 Nov 25 03:35 UTC │
│ delete │ -p kubernetes-upgrade-850960 │ kubernetes-upgrade-850960 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ 24 Nov 25 03:36 UTC │
│ start │ -p cert-expiration-846384 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=containerd │ cert-expiration-846384 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:36 UTC │
│ ssh │ force-systemd-env-574539 ssh cat /etc/containerd/config.toml │ force-systemd-env-574539 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:36 UTC │
│ delete │ -p force-systemd-env-574539 │ force-systemd-env-574539 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:36 UTC │
│ start │ -p cert-options-216763 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-216763 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:36 UTC │
│ ssh │ cert-options-216763 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-216763 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:36 UTC │
│ ssh │ -p cert-options-216763 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-216763 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:36 UTC │
│ delete │ -p cert-options-216763 │ cert-options-216763 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:36 UTC │
│ start │ -p old-k8s-version-098965 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-098965 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:38 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────────┴─────────┴─────────┴─────────────────────┴───────────
──────────┘
==> Last Start <==
Log file created at: 2025/11/24 03:36:59
Running on machine: ip-172-31-24-2
Binary: Built with gc go1.25.3 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1124 03:36:59.219087 456828 out.go:360] Setting OutFile to fd 1 ...
I1124 03:36:59.219245 456828 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 03:36:59.219258 456828 out.go:374] Setting ErrFile to fd 2...
I1124 03:36:59.219263 456828 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 03:36:59.219545 456828 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21975-255205/.minikube/bin
I1124 03:36:59.220001 456828 out.go:368] Setting JSON to false
I1124 03:36:59.221277 456828 start.go:133] hostinfo: {"hostname":"ip-172-31-24-2","uptime":8348,"bootTime":1763947072,"procs":184,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"6d436adf-771e-4269-b9a3-c25fd4fca4f5"}
I1124 03:36:59.221357 456828 start.go:143] virtualization:
I1124 03:36:59.227637 456828 out.go:179] * [old-k8s-version-098965] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I1124 03:36:59.231151 456828 out.go:179] - MINIKUBE_LOCATION=21975
I1124 03:36:59.231327 456828 notify.go:221] Checking for updates...
I1124 03:36:59.238122 456828 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1124 03:36:59.241423 456828 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21975-255205/kubeconfig
I1124 03:36:59.244671 456828 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21975-255205/.minikube
I1124 03:36:59.247794 456828 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I1124 03:36:59.250835 456828 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1124 03:36:59.254529 456828 config.go:182] Loaded profile config "cert-expiration-846384": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1124 03:36:59.254702 456828 driver.go:422] Setting default libvirt URI to qemu:///system
I1124 03:36:59.294116 456828 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I1124 03:36:59.294240 456828 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 03:36:59.357118 456828 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-24 03:36:59.346945612 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pat
h:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1124 03:36:59.357230 456828 docker.go:319] overlay module found
I1124 03:36:59.360704 456828 out.go:179] * Using the docker driver based on user configuration
I1124 03:36:59.363700 456828 start.go:309] selected driver: docker
I1124 03:36:59.363727 456828 start.go:927] validating driver "docker" against <nil>
I1124 03:36:59.363759 456828 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1124 03:36:59.364561 456828 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 03:36:59.415697 456828 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-24 03:36:59.406291614 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pat
h:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1124 03:36:59.415854 456828 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1124 03:36:59.416110 456828 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 03:36:59.419249 456828 out.go:179] * Using Docker driver with root privileges
I1124 03:36:59.422257 456828 cni.go:84] Creating CNI manager for ""
I1124 03:36:59.422344 456828 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 03:36:59.422359 456828 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1124 03:36:59.422448 456828 start.go:353] cluster config:
{Name:old-k8s-version-098965 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-098965 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSH
AuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 03:36:59.425538 456828 out.go:179] * Starting "old-k8s-version-098965" primary control-plane node in "old-k8s-version-098965" cluster
I1124 03:36:59.428289 456828 cache.go:134] Beginning downloading kic base image for docker with containerd
I1124 03:36:59.431323 456828 out.go:179] * Pulling base image v0.0.48-1763935653-21975 ...
I1124 03:36:59.434113 456828 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 03:36:59.434165 456828 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21975-255205/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4
I1124 03:36:59.434176 456828 cache.go:65] Caching tarball of preloaded images
I1124 03:36:59.434207 456828 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 in local docker daemon
I1124 03:36:59.434261 456828 preload.go:238] Found /home/jenkins/minikube-integration/21975-255205/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I1124 03:36:59.434272 456828 cache.go:68] Finished verifying existence of preloaded tar for v1.28.0 on containerd
I1124 03:36:59.434383 456828 profile.go:143] Saving config to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/config.json ...
I1124 03:36:59.434401 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/config.json: {Name:mk69515acb07727840b36c87604cba4bd531db8a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:36:59.454350 456828 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 in local docker daemon, skipping pull
I1124 03:36:59.454376 456828 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 exists in daemon, skipping load
I1124 03:36:59.454396 456828 cache.go:243] Successfully downloaded all kic artifacts
I1124 03:36:59.454427 456828 start.go:360] acquireMachinesLock for old-k8s-version-098965: {Name:mkfaf6c0e20ffd0f03bcaf5e2568b90f1af41e0c Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 03:36:59.454522 456828 start.go:364] duration metric: took 80.46µs to acquireMachinesLock for "old-k8s-version-098965"
I1124 03:36:59.454546 456828 start.go:93] Provisioning new machine with config: &{Name:old-k8s-version-098965 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-098965 Namespace:default APIServerHAVIP:
APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:f
alse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 03:36:59.454620 456828 start.go:125] createHost starting for "" (driver="docker")
I1124 03:36:59.460555 456828 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1124 03:36:59.460925 456828 start.go:159] libmachine.API.Create for "old-k8s-version-098965" (driver="docker")
I1124 03:36:59.460965 456828 client.go:173] LocalClient.Create starting
I1124 03:36:59.461101 456828 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca.pem
I1124 03:36:59.461190 456828 main.go:143] libmachine: Decoding PEM data...
I1124 03:36:59.461213 456828 main.go:143] libmachine: Parsing certificate...
I1124 03:36:59.461265 456828 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21975-255205/.minikube/certs/cert.pem
I1124 03:36:59.461289 456828 main.go:143] libmachine: Decoding PEM data...
I1124 03:36:59.461301 456828 main.go:143] libmachine: Parsing certificate...
I1124 03:36:59.461680 456828 cli_runner.go:164] Run: docker network inspect old-k8s-version-098965 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1124 03:36:59.477952 456828 cli_runner.go:211] docker network inspect old-k8s-version-098965 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1124 03:36:59.478060 456828 network_create.go:284] running [docker network inspect old-k8s-version-098965] to gather additional debugging logs...
I1124 03:36:59.478084 456828 cli_runner.go:164] Run: docker network inspect old-k8s-version-098965
W1124 03:36:59.501264 456828 cli_runner.go:211] docker network inspect old-k8s-version-098965 returned with exit code 1
I1124 03:36:59.501311 456828 network_create.go:287] error running [docker network inspect old-k8s-version-098965]: docker network inspect old-k8s-version-098965: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-098965 not found
I1124 03:36:59.501326 456828 network_create.go:289] output of [docker network inspect old-k8s-version-098965]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-098965 not found
** /stderr **
I1124 03:36:59.501444 456828 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 03:36:59.520261 456828 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-752aaa40bb3d IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:3a:00:20:e4:71:15} reservation:<nil>}
I1124 03:36:59.520804 456828 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-cbb0dee281db IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:76:ff:07:3e:91:0f} reservation:<nil>}
I1124 03:36:59.521086 456828 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-d95ffec60547 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:fe:b5:f2:ed:07:1e} reservation:<nil>}
I1124 03:36:59.521451 456828 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-1b3e5c8c3c27 IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:7e:8b:7b:bd:23:4e} reservation:<nil>}
I1124 03:36:59.521977 456828 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001a3e7d0}
I1124 03:36:59.522000 456828 network_create.go:124] attempt to create docker network old-k8s-version-098965 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1124 03:36:59.522073 456828 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-098965 old-k8s-version-098965
I1124 03:36:59.585194 456828 network_create.go:108] docker network old-k8s-version-098965 192.168.85.0/24 created
I1124 03:36:59.585224 456828 kic.go:121] calculated static IP "192.168.85.2" for the "old-k8s-version-098965" container
I1124 03:36:59.585319 456828 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1124 03:36:59.601540 456828 cli_runner.go:164] Run: docker volume create old-k8s-version-098965 --label name.minikube.sigs.k8s.io=old-k8s-version-098965 --label created_by.minikube.sigs.k8s.io=true
I1124 03:36:59.619479 456828 oci.go:103] Successfully created a docker volume old-k8s-version-098965
I1124 03:36:59.619575 456828 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-098965-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-098965 --entrypoint /usr/bin/test -v old-k8s-version-098965:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 -d /var/lib
I1124 03:37:00.531368 456828 oci.go:107] Successfully prepared a docker volume old-k8s-version-098965
I1124 03:37:00.531432 456828 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 03:37:00.531457 456828 kic.go:194] Starting extracting preloaded images to volume ...
I1124 03:37:00.531528 456828 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21975-255205/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-098965:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 -I lz4 -xf /preloaded.tar -C /extractDir
I1124 03:37:06.094759 456828 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21975-255205/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-098965:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 -I lz4 -xf /preloaded.tar -C /extractDir: (5.56319468s)
I1124 03:37:06.094794 456828 kic.go:203] duration metric: took 5.563348412s to extract preloaded images to volume ...
W1124 03:37:06.094942 456828 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1124 03:37:06.095054 456828 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1124 03:37:06.151713 456828 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-098965 --name old-k8s-version-098965 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-098965 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-098965 --network old-k8s-version-098965 --ip 192.168.85.2 --volume old-k8s-version-098965:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787
I1124 03:37:06.468910 456828 cli_runner.go:164] Run: docker container inspect old-k8s-version-098965 --format={{.State.Running}}
I1124 03:37:06.491115 456828 cli_runner.go:164] Run: docker container inspect old-k8s-version-098965 --format={{.State.Status}}
I1124 03:37:06.525628 456828 cli_runner.go:164] Run: docker exec old-k8s-version-098965 stat /var/lib/dpkg/alternatives/iptables
I1124 03:37:06.579576 456828 oci.go:144] the created container "old-k8s-version-098965" has a running status.
I1124 03:37:06.579609 456828 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa...
I1124 03:37:06.729919 456828 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1124 03:37:06.749775 456828 cli_runner.go:164] Run: docker container inspect old-k8s-version-098965 --format={{.State.Status}}
I1124 03:37:06.775133 456828 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1124 03:37:06.775157 456828 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-098965 chown docker:docker /home/docker/.ssh/authorized_keys]
I1124 03:37:06.826229 456828 cli_runner.go:164] Run: docker container inspect old-k8s-version-098965 --format={{.State.Status}}
I1124 03:37:06.849606 456828 machine.go:94] provisionDockerMachine start ...
I1124 03:37:06.849724 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:06.876665 456828 main.go:143] libmachine: Using SSH client type: native
I1124 03:37:06.877012 456828 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33418 <nil> <nil>}
I1124 03:37:06.877028 456828 main.go:143] libmachine: About to run SSH command:
hostname
I1124 03:37:06.877699 456828 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:43434->127.0.0.1:33418: read: connection reset by peer
I1124 03:37:10.029639 456828 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-098965
I1124 03:37:10.029683 456828 ubuntu.go:182] provisioning hostname "old-k8s-version-098965"
I1124 03:37:10.029771 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:10.053348 456828 main.go:143] libmachine: Using SSH client type: native
I1124 03:37:10.053702 456828 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33418 <nil> <nil>}
I1124 03:37:10.053721 456828 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-098965 && echo "old-k8s-version-098965" | sudo tee /etc/hostname
I1124 03:37:10.214587 456828 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-098965
I1124 03:37:10.214746 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:10.234062 456828 main.go:143] libmachine: Using SSH client type: native
I1124 03:37:10.234384 456828 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33418 <nil> <nil>}
I1124 03:37:10.234408 456828 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-098965' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-098965/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-098965' | sudo tee -a /etc/hosts;
fi
fi
I1124 03:37:10.384740 456828 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1124 03:37:10.384768 456828 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21975-255205/.minikube CaCertPath:/home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21975-255205/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21975-255205/.minikube}
I1124 03:37:10.384789 456828 ubuntu.go:190] setting up certificates
I1124 03:37:10.384814 456828 provision.go:84] configureAuth start
I1124 03:37:10.384887 456828 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-098965
I1124 03:37:10.402644 456828 provision.go:143] copyHostCerts
I1124 03:37:10.402723 456828 exec_runner.go:144] found /home/jenkins/minikube-integration/21975-255205/.minikube/ca.pem, removing ...
I1124 03:37:10.402738 456828 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21975-255205/.minikube/ca.pem
I1124 03:37:10.402815 456828 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21975-255205/.minikube/ca.pem (1078 bytes)
I1124 03:37:10.402908 456828 exec_runner.go:144] found /home/jenkins/minikube-integration/21975-255205/.minikube/cert.pem, removing ...
I1124 03:37:10.402916 456828 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21975-255205/.minikube/cert.pem
I1124 03:37:10.402943 456828 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21975-255205/.minikube/cert.pem (1123 bytes)
I1124 03:37:10.403038 456828 exec_runner.go:144] found /home/jenkins/minikube-integration/21975-255205/.minikube/key.pem, removing ...
I1124 03:37:10.403049 456828 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21975-255205/.minikube/key.pem
I1124 03:37:10.403076 456828 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21975-255205/.minikube/key.pem (1675 bytes)
I1124 03:37:10.403135 456828 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21975-255205/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-098965 san=[127.0.0.1 192.168.85.2 localhost minikube old-k8s-version-098965]
I1124 03:37:10.629214 456828 provision.go:177] copyRemoteCerts
I1124 03:37:10.629287 456828 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1124 03:37:10.629356 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:10.650240 456828 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33418 SSHKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa Username:docker}
I1124 03:37:10.757128 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1124 03:37:10.776341 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1124 03:37:10.795643 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1124 03:37:10.814914 456828 provision.go:87] duration metric: took 430.069497ms to configureAuth
I1124 03:37:10.814945 456828 ubuntu.go:206] setting minikube options for container-runtime
I1124 03:37:10.815151 456828 config.go:182] Loaded profile config "old-k8s-version-098965": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 03:37:10.815164 456828 machine.go:97] duration metric: took 3.965526971s to provisionDockerMachine
I1124 03:37:10.815172 456828 client.go:176] duration metric: took 11.35420095s to LocalClient.Create
I1124 03:37:10.815193 456828 start.go:167] duration metric: took 11.354269562s to libmachine.API.Create "old-k8s-version-098965"
I1124 03:37:10.815206 456828 start.go:293] postStartSetup for "old-k8s-version-098965" (driver="docker")
I1124 03:37:10.815216 456828 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1124 03:37:10.815268 456828 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1124 03:37:10.815313 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:10.835952 456828 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33418 SSHKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa Username:docker}
I1124 03:37:10.940940 456828 ssh_runner.go:195] Run: cat /etc/os-release
I1124 03:37:10.944392 456828 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1124 03:37:10.944420 456828 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1124 03:37:10.944432 456828 filesync.go:126] Scanning /home/jenkins/minikube-integration/21975-255205/.minikube/addons for local assets ...
I1124 03:37:10.944531 456828 filesync.go:126] Scanning /home/jenkins/minikube-integration/21975-255205/.minikube/files for local assets ...
I1124 03:37:10.944626 456828 filesync.go:149] local asset: /home/jenkins/minikube-integration/21975-255205/.minikube/files/etc/ssl/certs/2570692.pem -> 2570692.pem in /etc/ssl/certs
I1124 03:37:10.944739 456828 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1124 03:37:10.953432 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/files/etc/ssl/certs/2570692.pem --> /etc/ssl/certs/2570692.pem (1708 bytes)
I1124 03:37:10.971988 456828 start.go:296] duration metric: took 156.749021ms for postStartSetup
I1124 03:37:10.972390 456828 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-098965
I1124 03:37:10.990273 456828 profile.go:143] Saving config to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/config.json ...
I1124 03:37:10.990577 456828 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1124 03:37:10.990655 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:11.011483 456828 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33418 SSHKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa Username:docker}
I1124 03:37:11.114126 456828 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1124 03:37:11.120113 456828 start.go:128] duration metric: took 11.665472167s to createHost
I1124 03:37:11.120148 456828 start.go:83] releasing machines lock for "old-k8s-version-098965", held for 11.665617423s
I1124 03:37:11.120263 456828 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-098965
I1124 03:37:11.139454 456828 ssh_runner.go:195] Run: cat /version.json
I1124 03:37:11.139475 456828 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1124 03:37:11.139509 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:11.139546 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:11.159657 456828 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33418 SSHKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa Username:docker}
I1124 03:37:11.178004 456828 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33418 SSHKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa Username:docker}
I1124 03:37:11.355366 456828 ssh_runner.go:195] Run: systemctl --version
I1124 03:37:11.362316 456828 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1124 03:37:11.366848 456828 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1124 03:37:11.366938 456828 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1124 03:37:11.395828 456828 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I1124 03:37:11.395905 456828 start.go:496] detecting cgroup driver to use...
I1124 03:37:11.395958 456828 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1124 03:37:11.396051 456828 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1124 03:37:11.412427 456828 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1124 03:37:11.425664 456828 docker.go:218] disabling cri-docker service (if available) ...
I1124 03:37:11.425739 456828 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1124 03:37:11.443137 456828 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1124 03:37:11.466719 456828 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1124 03:37:11.592922 456828 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1124 03:37:11.733496 456828 docker.go:234] disabling docker service ...
I1124 03:37:11.733625 456828 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1124 03:37:11.756653 456828 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1124 03:37:11.773475 456828 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1124 03:37:11.921229 456828 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1124 03:37:12.062701 456828 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1124 03:37:12.076946 456828 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1124 03:37:12.092118 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1124 03:37:12.101736 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1124 03:37:12.111290 456828 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1124 03:37:12.111365 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1124 03:37:12.120980 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 03:37:12.130335 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1124 03:37:12.139831 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 03:37:12.149028 456828 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1124 03:37:12.158976 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1124 03:37:12.168289 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1124 03:37:12.179157 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1124 03:37:12.189127 456828 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1124 03:37:12.196909 456828 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1124 03:37:12.204634 456828 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 03:37:12.341578 456828 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1124 03:37:12.476923 456828 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1124 03:37:12.477008 456828 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1124 03:37:12.481370 456828 start.go:564] Will wait 60s for crictl version
I1124 03:37:12.481445 456828 ssh_runner.go:195] Run: which crictl
I1124 03:37:12.485391 456828 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1124 03:37:12.516194 456828 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1124 03:37:12.516279 456828 ssh_runner.go:195] Run: containerd --version
I1124 03:37:12.539070 456828 ssh_runner.go:195] Run: containerd --version
I1124 03:37:12.568276 456828 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1124 03:37:12.571173 456828 cli_runner.go:164] Run: docker network inspect old-k8s-version-098965 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 03:37:12.589022 456828 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I1124 03:37:12.593373 456828 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 03:37:12.603275 456828 kubeadm.go:884] updating cluster {Name:old-k8s-version-098965 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-098965 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1124 03:37:12.603400 456828 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 03:37:12.603468 456828 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 03:37:12.632602 456828 containerd.go:627] all images are preloaded for containerd runtime.
I1124 03:37:12.632625 456828 containerd.go:534] Images already preloaded, skipping extraction
I1124 03:37:12.632687 456828 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 03:37:12.658408 456828 containerd.go:627] all images are preloaded for containerd runtime.
I1124 03:37:12.658429 456828 cache_images.go:86] Images are preloaded, skipping loading
I1124 03:37:12.658437 456828 kubeadm.go:935] updating node { 192.168.85.2 8443 v1.28.0 containerd true true} ...
I1124 03:37:12.658540 456828 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-098965 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-098965 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1124 03:37:12.658617 456828 ssh_runner.go:195] Run: sudo crictl info
I1124 03:37:12.684758 456828 cni.go:84] Creating CNI manager for ""
I1124 03:37:12.684785 456828 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 03:37:12.684799 456828 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1124 03:37:12.684823 456828 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-098965 NodeName:old-k8s-version-098965 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt
StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1124 03:37:12.684947 456828 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-098965"
kubeletExtraArgs:
node-ip: 192.168.85.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1124 03:37:12.685018 456828 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1124 03:37:12.693523 456828 binaries.go:51] Found k8s binaries, skipping transfer
I1124 03:37:12.693645 456828 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1124 03:37:12.702042 456828 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1124 03:37:12.715991 456828 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1124 03:37:12.729770 456828 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2176 bytes)
I1124 03:37:12.743578 456828 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I1124 03:37:12.747534 456828 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 03:37:12.757715 456828 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 03:37:12.884405 456828 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 03:37:12.905132 456828 certs.go:69] Setting up /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965 for IP: 192.168.85.2
I1124 03:37:12.905158 456828 certs.go:195] generating shared ca certs ...
I1124 03:37:12.905175 456828 certs.go:227] acquiring lock for ca certs: {Name:mk7774f5066ddc2da4b4108ade01c52c4ed6acef Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:12.905388 456828 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21975-255205/.minikube/ca.key
I1124 03:37:12.905463 456828 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21975-255205/.minikube/proxy-client-ca.key
I1124 03:37:12.905478 456828 certs.go:257] generating profile certs ...
I1124 03:37:12.905558 456828 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/client.key
I1124 03:37:12.905577 456828 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/client.crt with IP's: []
I1124 03:37:13.092952 456828 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/client.crt ...
I1124 03:37:13.092989 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/client.crt: {Name:mkdd0fa6209ccf6aa2aa41557354bcbc75868f78 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:13.093227 456828 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/client.key ...
I1124 03:37:13.093245 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/client.key: {Name:mk9ee935a6f1a8dd6673b97d66ec46cca5ad1664 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:13.093351 456828 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.key.56338243
I1124 03:37:13.093373 456828 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.crt.56338243 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.85.2]
I1124 03:37:13.449033 456828 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.crt.56338243 ...
I1124 03:37:13.449067 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.crt.56338243: {Name:mk65bad85814a0a12971d39286d0e5c451efbbb6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:13.449251 456828 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.key.56338243 ...
I1124 03:37:13.449269 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.key.56338243: {Name:mk64b57d615101ac92823627ae52dbd8c44bfea8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:13.449353 456828 certs.go:382] copying /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.crt.56338243 -> /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.crt
I1124 03:37:13.449436 456828 certs.go:386] copying /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.key.56338243 -> /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.key
I1124 03:37:13.449500 456828 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.key
I1124 03:37:13.449520 456828 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.crt with IP's: []
I1124 03:37:13.614481 456828 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.crt ...
I1124 03:37:13.614513 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.crt: {Name:mk7045112c74be0d05a12bbf47e455d86596546e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:13.614698 456828 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.key ...
I1124 03:37:13.614719 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.key: {Name:mkbadb2ce2b4c7ecb9f7755942cb7ff8139714e8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:13.614923 456828 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/257069.pem (1338 bytes)
W1124 03:37:13.614972 456828 certs.go:480] ignoring /home/jenkins/minikube-integration/21975-255205/.minikube/certs/257069_empty.pem, impossibly tiny 0 bytes
I1124 03:37:13.614987 456828 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca-key.pem (1675 bytes)
I1124 03:37:13.615022 456828 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca.pem (1078 bytes)
I1124 03:37:13.615052 456828 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/cert.pem (1123 bytes)
I1124 03:37:13.615080 456828 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/key.pem (1675 bytes)
I1124 03:37:13.615129 456828 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-255205/.minikube/files/etc/ssl/certs/2570692.pem (1708 bytes)
I1124 03:37:13.615732 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1124 03:37:13.637058 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1124 03:37:13.656196 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1124 03:37:13.675513 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1124 03:37:13.694987 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1124 03:37:13.714921 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1124 03:37:13.733764 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1124 03:37:13.753359 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1124 03:37:13.772856 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/certs/257069.pem --> /usr/share/ca-certificates/257069.pem (1338 bytes)
I1124 03:37:13.791271 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/files/etc/ssl/certs/2570692.pem --> /usr/share/ca-certificates/2570692.pem (1708 bytes)
I1124 03:37:13.810674 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1124 03:37:13.830260 456828 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1124 03:37:13.845031 456828 ssh_runner.go:195] Run: openssl version
I1124 03:37:13.854200 456828 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/257069.pem && ln -fs /usr/share/ca-certificates/257069.pem /etc/ssl/certs/257069.pem"
I1124 03:37:13.864090 456828 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/257069.pem
I1124 03:37:13.868253 456828 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 24 02:58 /usr/share/ca-certificates/257069.pem
I1124 03:37:13.868333 456828 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/257069.pem
I1124 03:37:13.910904 456828 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/257069.pem /etc/ssl/certs/51391683.0"
I1124 03:37:13.920025 456828 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/2570692.pem && ln -fs /usr/share/ca-certificates/2570692.pem /etc/ssl/certs/2570692.pem"
I1124 03:37:13.928734 456828 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2570692.pem
I1124 03:37:13.932666 456828 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 24 02:58 /usr/share/ca-certificates/2570692.pem
I1124 03:37:13.932765 456828 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2570692.pem
I1124 03:37:13.979663 456828 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/2570692.pem /etc/ssl/certs/3ec20f2e.0"
I1124 03:37:13.988918 456828 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1124 03:37:13.998028 456828 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1124 03:37:14.003766 456828 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 24 02:51 /usr/share/ca-certificates/minikubeCA.pem
I1124 03:37:14.003942 456828 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1124 03:37:14.050814 456828 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1124 03:37:14.059590 456828 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1124 03:37:14.063378 456828 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1124 03:37:14.063446 456828 kubeadm.go:401] StartCluster: {Name:old-k8s-version-098965 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-098965 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 03:37:14.063519 456828 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1124 03:37:14.063584 456828 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1124 03:37:14.095036 456828 cri.go:89] found id: ""
I1124 03:37:14.095112 456828 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1124 03:37:14.103077 456828 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1124 03:37:14.111415 456828 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1124 03:37:14.111511 456828 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1124 03:37:14.120533 456828 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1124 03:37:14.120557 456828 kubeadm.go:158] found existing configuration files:
I1124 03:37:14.120636 456828 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1124 03:37:14.129223 456828 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1124 03:37:14.129299 456828 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1124 03:37:14.137169 456828 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1124 03:37:14.145264 456828 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1124 03:37:14.145326 456828 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1124 03:37:14.153440 456828 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1124 03:37:14.161802 456828 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1124 03:37:14.161868 456828 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1124 03:37:14.169170 456828 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1124 03:37:14.176729 456828 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1124 03:37:14.176794 456828 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1124 03:37:14.184164 456828 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1124 03:37:14.235215 456828 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1124 03:37:14.235279 456828 kubeadm.go:319] [preflight] Running pre-flight checks
I1124 03:37:14.275786 456828 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1124 03:37:14.275863 456828 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I1124 03:37:14.275904 456828 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1124 03:37:14.275954 456828 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1124 03:37:14.276007 456828 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1124 03:37:14.276058 456828 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1124 03:37:14.276111 456828 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1124 03:37:14.276161 456828 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1124 03:37:14.276213 456828 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1124 03:37:14.276262 456828 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1124 03:37:14.276314 456828 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1124 03:37:14.276364 456828 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1124 03:37:14.361031 456828 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1124 03:37:14.361200 456828 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1124 03:37:14.361333 456828 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1124 03:37:14.534299 456828 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1124 03:37:14.537587 456828 out.go:252] - Generating certificates and keys ...
I1124 03:37:14.537751 456828 kubeadm.go:319] [certs] Using existing ca certificate authority
I1124 03:37:14.537876 456828 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1124 03:37:15.136064 456828 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1124 03:37:15.790461 456828 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1124 03:37:16.745198 456828 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1124 03:37:17.101081 456828 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1124 03:37:17.816844 456828 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1124 03:37:17.817225 456828 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-098965] and IPs [192.168.85.2 127.0.0.1 ::1]
I1124 03:37:18.708622 456828 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1124 03:37:18.708941 456828 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-098965] and IPs [192.168.85.2 127.0.0.1 ::1]
I1124 03:37:19.626997 456828 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1124 03:37:20.013744 456828 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1124 03:37:21.332223 456828 kubeadm.go:319] [certs] Generating "sa" key and public key
I1124 03:37:21.333010 456828 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1124 03:37:21.538924 456828 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1124 03:37:21.950934 456828 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1124 03:37:23.178695 456828 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1124 03:37:23.307692 456828 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1124 03:37:23.308662 456828 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1124 03:37:23.312055 456828 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1124 03:37:23.317674 456828 out.go:252] - Booting up control plane ...
I1124 03:37:23.317788 456828 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1124 03:37:23.317867 456828 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1124 03:37:23.317934 456828 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1124 03:37:23.338190 456828 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1124 03:37:23.339603 456828 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1124 03:37:23.339662 456828 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1124 03:37:23.480314 456828 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1124 03:37:31.483609 456828 kubeadm.go:319] [apiclient] All control plane components are healthy after 8.003694 seconds
I1124 03:37:31.483744 456828 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1124 03:37:31.502417 456828 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1124 03:37:32.033208 456828 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1124 03:37:32.033430 456828 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-098965 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1124 03:37:32.546541 456828 kubeadm.go:319] [bootstrap-token] Using token: ycw9qc.7i65x4n1zr1z1k2d
I1124 03:37:32.549515 456828 out.go:252] - Configuring RBAC rules ...
I1124 03:37:32.549646 456828 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1124 03:37:32.555568 456828 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1124 03:37:32.568444 456828 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1124 03:37:32.576351 456828 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1124 03:37:32.581974 456828 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1124 03:37:32.586465 456828 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1124 03:37:32.604043 456828 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1124 03:37:32.913255 456828 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1124 03:37:32.963682 456828 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1124 03:37:32.981293 456828 kubeadm.go:319]
I1124 03:37:32.981375 456828 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1124 03:37:32.981391 456828 kubeadm.go:319]
I1124 03:37:32.981470 456828 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1124 03:37:32.981490 456828 kubeadm.go:319]
I1124 03:37:32.981515 456828 kubeadm.go:319] mkdir -p $HOME/.kube
I1124 03:37:32.983497 456828 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1124 03:37:32.983565 456828 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1124 03:37:32.983572 456828 kubeadm.go:319]
I1124 03:37:32.983653 456828 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1124 03:37:32.983663 456828 kubeadm.go:319]
I1124 03:37:32.983715 456828 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1124 03:37:32.983723 456828 kubeadm.go:319]
I1124 03:37:32.983775 456828 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1124 03:37:32.983853 456828 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1124 03:37:32.983929 456828 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1124 03:37:32.983938 456828 kubeadm.go:319]
I1124 03:37:32.984037 456828 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1124 03:37:32.984117 456828 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1124 03:37:32.984123 456828 kubeadm.go:319]
I1124 03:37:32.984216 456828 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token ycw9qc.7i65x4n1zr1z1k2d \
I1124 03:37:32.984336 456828 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:7c8724c9df7bddf0d2f355149f7d996f734006ccfb255d81436a9364083c5f40 \
I1124 03:37:32.984390 456828 kubeadm.go:319] --control-plane
I1124 03:37:32.984397 456828 kubeadm.go:319]
I1124 03:37:32.984704 456828 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1124 03:37:32.984716 456828 kubeadm.go:319]
I1124 03:37:32.984916 456828 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token ycw9qc.7i65x4n1zr1z1k2d \
I1124 03:37:32.985043 456828 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:7c8724c9df7bddf0d2f355149f7d996f734006ccfb255d81436a9364083c5f40
I1124 03:37:32.991944 456828 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I1124 03:37:32.992068 456828 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1124 03:37:32.992094 456828 cni.go:84] Creating CNI manager for ""
I1124 03:37:32.992102 456828 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 03:37:32.995225 456828 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1124 03:37:32.998096 456828 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1124 03:37:33.004093 456828 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1124 03:37:33.004119 456828 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1124 03:37:33.036441 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1124 03:37:34.155879 456828 ssh_runner.go:235] Completed: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml: (1.119390679s)
I1124 03:37:34.155921 456828 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1124 03:37:34.156043 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-098965 minikube.k8s.io/updated_at=2025_11_24T03_37_34_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=525fef2394fe4854b27b3c3385e33403fd802864 minikube.k8s.io/name=old-k8s-version-098965 minikube.k8s.io/primary=true
I1124 03:37:34.156059 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:34.370014 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:34.370081 456828 ops.go:34] apiserver oom_adj: -16
I1124 03:37:34.870621 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:35.370425 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:35.870744 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:36.370591 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:36.870102 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:37.370755 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:37.870481 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:38.370729 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:38.870716 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:39.370861 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:39.870112 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:40.370985 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:40.870131 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:41.370224 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:41.870910 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:42.370129 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:42.870708 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:43.370299 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:43.870132 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:44.370373 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:44.870148 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:45.370873 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:45.870208 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:46.370930 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:46.870103 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:46.965988 456828 kubeadm.go:1114] duration metric: took 12.810009639s to wait for elevateKubeSystemPrivileges
I1124 03:37:46.966014 456828 kubeadm.go:403] duration metric: took 32.902577839s to StartCluster
I1124 03:37:46.966033 456828 settings.go:142] acquiring lock: {Name:mk06b563e5bc383cd64ed92ea3d8ac6aac195923 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:46.966096 456828 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21975-255205/kubeconfig
I1124 03:37:46.967091 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/kubeconfig: {Name:mk59b88a9b5c6c93f7412b3f64976d4efe64bdb2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:46.967316 456828 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 03:37:46.967431 456828 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1124 03:37:46.967681 456828 config.go:182] Loaded profile config "old-k8s-version-098965": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 03:37:46.967714 456828 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1124 03:37:46.967774 456828 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-098965"
I1124 03:37:46.967788 456828 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-098965"
I1124 03:37:46.967809 456828 host.go:66] Checking if "old-k8s-version-098965" exists ...
I1124 03:37:46.968568 456828 cli_runner.go:164] Run: docker container inspect old-k8s-version-098965 --format={{.State.Status}}
I1124 03:37:46.969265 456828 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-098965"
I1124 03:37:46.969293 456828 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-098965"
I1124 03:37:46.969620 456828 cli_runner.go:164] Run: docker container inspect old-k8s-version-098965 --format={{.State.Status}}
I1124 03:37:46.976026 456828 out.go:179] * Verifying Kubernetes components...
I1124 03:37:46.980332 456828 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 03:37:47.005475 456828 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-098965"
I1124 03:37:47.005521 456828 host.go:66] Checking if "old-k8s-version-098965" exists ...
I1124 03:37:47.006016 456828 cli_runner.go:164] Run: docker container inspect old-k8s-version-098965 --format={{.State.Status}}
I1124 03:37:47.021223 456828 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1124 03:37:47.025797 456828 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1124 03:37:47.025822 456828 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1124 03:37:47.025899 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:47.043575 456828 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1124 03:37:47.043596 456828 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1124 03:37:47.043662 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:47.067444 456828 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33418 SSHKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa Username:docker}
I1124 03:37:47.085937 456828 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33418 SSHKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa Username:docker}
I1124 03:37:47.279804 456828 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.85.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1124 03:37:47.286358 456828 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 03:37:47.448103 456828 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1124 03:37:47.467412 456828 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1124 03:37:48.289350 456828 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (1.002895345s)
I1124 03:37:48.290385 456828 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-098965" to be "Ready" ...
I1124 03:37:48.311715 456828 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.85.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.031826007s)
I1124 03:37:48.311750 456828 start.go:977] {"host.minikube.internal": 192.168.85.1} host record injected into CoreDNS's ConfigMap
I1124 03:37:48.801783 456828 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.334276635s)
I1124 03:37:48.805329 456828 out.go:179] * Enabled addons: default-storageclass, storage-provisioner
I1124 03:37:48.808579 456828 addons.go:530] duration metric: took 1.840852722s for enable addons: enabled=[default-storageclass storage-provisioner]
I1124 03:37:48.816214 456828 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-098965" context rescaled to 1 replicas
W1124 03:37:50.294251 456828 node_ready.go:57] node "old-k8s-version-098965" has "Ready":"False" status (will retry)
W1124 03:37:52.793560 456828 node_ready.go:57] node "old-k8s-version-098965" has "Ready":"False" status (will retry)
W1124 03:37:54.793903 456828 node_ready.go:57] node "old-k8s-version-098965" has "Ready":"False" status (will retry)
W1124 03:37:56.794298 456828 node_ready.go:57] node "old-k8s-version-098965" has "Ready":"False" status (will retry)
W1124 03:37:59.293536 456828 node_ready.go:57] node "old-k8s-version-098965" has "Ready":"False" status (will retry)
I1124 03:38:00.344278 456828 node_ready.go:49] node "old-k8s-version-098965" is "Ready"
I1124 03:38:00.344383 456828 node_ready.go:38] duration metric: took 12.053923317s for node "old-k8s-version-098965" to be "Ready" ...
I1124 03:38:00.344417 456828 api_server.go:52] waiting for apiserver process to appear ...
I1124 03:38:00.344536 456828 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1124 03:38:00.402952 456828 api_server.go:72] duration metric: took 13.435606359s to wait for apiserver process to appear ...
I1124 03:38:00.402979 456828 api_server.go:88] waiting for apiserver healthz status ...
I1124 03:38:00.403000 456828 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1124 03:38:00.414315 456828 api_server.go:279] https://192.168.85.2:8443/healthz returned 200:
ok
I1124 03:38:00.416457 456828 api_server.go:141] control plane version: v1.28.0
I1124 03:38:00.416601 456828 api_server.go:131] duration metric: took 13.613451ms to wait for apiserver health ...
I1124 03:38:00.416631 456828 system_pods.go:43] waiting for kube-system pods to appear ...
I1124 03:38:00.425631 456828 system_pods.go:59] 8 kube-system pods found
I1124 03:38:00.425724 456828 system_pods.go:61] "coredns-5dd5756b68-2kmf2" [9c6642fb-17b7-4199-b927-eb63b9a58260] Pending
I1124 03:38:00.425749 456828 system_pods.go:61] "etcd-old-k8s-version-098965" [994c486f-9839-4407-bc6d-d7c52c9dcfe7] Running
I1124 03:38:00.425783 456828 system_pods.go:61] "kindnet-mctv9" [0f0d91cd-7d64-482e-b33c-383b20f5bd79] Running
I1124 03:38:00.425810 456828 system_pods.go:61] "kube-apiserver-old-k8s-version-098965" [777b36fe-0c46-4427-90b9-ef48ae1cc287] Running
I1124 03:38:00.425830 456828 system_pods.go:61] "kube-controller-manager-old-k8s-version-098965" [3be22a1a-db9f-446f-9b0a-e61ce5482e12] Running
I1124 03:38:00.425851 456828 system_pods.go:61] "kube-proxy-5t7nq" [6050bdb0-6390-48c7-863f-520ef6277ad8] Running
I1124 03:38:00.425879 456828 system_pods.go:61] "kube-scheduler-old-k8s-version-098965" [ff509e4b-4fde-4ea0-8261-5f4463c5be01] Running
I1124 03:38:00.425909 456828 system_pods.go:61] "storage-provisioner" [9ede1da5-704c-4aab-93e0-77ce93158129] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 03:38:00.425943 456828 system_pods.go:74] duration metric: took 9.290401ms to wait for pod list to return data ...
I1124 03:38:00.425969 456828 default_sa.go:34] waiting for default service account to be created ...
I1124 03:38:00.429247 456828 default_sa.go:45] found service account: "default"
I1124 03:38:00.429360 456828 default_sa.go:55] duration metric: took 3.356866ms for default service account to be created ...
I1124 03:38:00.429393 456828 system_pods.go:116] waiting for k8s-apps to be running ...
I1124 03:38:00.435199 456828 system_pods.go:86] 8 kube-system pods found
I1124 03:38:00.435313 456828 system_pods.go:89] "coredns-5dd5756b68-2kmf2" [9c6642fb-17b7-4199-b927-eb63b9a58260] Pending
I1124 03:38:00.435337 456828 system_pods.go:89] "etcd-old-k8s-version-098965" [994c486f-9839-4407-bc6d-d7c52c9dcfe7] Running
I1124 03:38:00.435376 456828 system_pods.go:89] "kindnet-mctv9" [0f0d91cd-7d64-482e-b33c-383b20f5bd79] Running
I1124 03:38:00.435403 456828 system_pods.go:89] "kube-apiserver-old-k8s-version-098965" [777b36fe-0c46-4427-90b9-ef48ae1cc287] Running
I1124 03:38:00.435426 456828 system_pods.go:89] "kube-controller-manager-old-k8s-version-098965" [3be22a1a-db9f-446f-9b0a-e61ce5482e12] Running
I1124 03:38:00.435460 456828 system_pods.go:89] "kube-proxy-5t7nq" [6050bdb0-6390-48c7-863f-520ef6277ad8] Running
I1124 03:38:00.435482 456828 system_pods.go:89] "kube-scheduler-old-k8s-version-098965" [ff509e4b-4fde-4ea0-8261-5f4463c5be01] Running
I1124 03:38:00.435514 456828 system_pods.go:89] "storage-provisioner" [9ede1da5-704c-4aab-93e0-77ce93158129] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 03:38:00.435576 456828 retry.go:31] will retry after 251.537949ms: missing components: kube-dns
I1124 03:38:00.691897 456828 system_pods.go:86] 8 kube-system pods found
I1124 03:38:00.691936 456828 system_pods.go:89] "coredns-5dd5756b68-2kmf2" [9c6642fb-17b7-4199-b927-eb63b9a58260] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 03:38:00.691943 456828 system_pods.go:89] "etcd-old-k8s-version-098965" [994c486f-9839-4407-bc6d-d7c52c9dcfe7] Running
I1124 03:38:00.691949 456828 system_pods.go:89] "kindnet-mctv9" [0f0d91cd-7d64-482e-b33c-383b20f5bd79] Running
I1124 03:38:00.691954 456828 system_pods.go:89] "kube-apiserver-old-k8s-version-098965" [777b36fe-0c46-4427-90b9-ef48ae1cc287] Running
I1124 03:38:00.691959 456828 system_pods.go:89] "kube-controller-manager-old-k8s-version-098965" [3be22a1a-db9f-446f-9b0a-e61ce5482e12] Running
I1124 03:38:00.691968 456828 system_pods.go:89] "kube-proxy-5t7nq" [6050bdb0-6390-48c7-863f-520ef6277ad8] Running
I1124 03:38:00.691976 456828 system_pods.go:89] "kube-scheduler-old-k8s-version-098965" [ff509e4b-4fde-4ea0-8261-5f4463c5be01] Running
I1124 03:38:00.691981 456828 system_pods.go:89] "storage-provisioner" [9ede1da5-704c-4aab-93e0-77ce93158129] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 03:38:00.691999 456828 retry.go:31] will retry after 269.359214ms: missing components: kube-dns
I1124 03:38:00.970909 456828 system_pods.go:86] 8 kube-system pods found
I1124 03:38:00.970944 456828 system_pods.go:89] "coredns-5dd5756b68-2kmf2" [9c6642fb-17b7-4199-b927-eb63b9a58260] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 03:38:00.970951 456828 system_pods.go:89] "etcd-old-k8s-version-098965" [994c486f-9839-4407-bc6d-d7c52c9dcfe7] Running
I1124 03:38:00.970957 456828 system_pods.go:89] "kindnet-mctv9" [0f0d91cd-7d64-482e-b33c-383b20f5bd79] Running
I1124 03:38:00.970961 456828 system_pods.go:89] "kube-apiserver-old-k8s-version-098965" [777b36fe-0c46-4427-90b9-ef48ae1cc287] Running
I1124 03:38:00.970966 456828 system_pods.go:89] "kube-controller-manager-old-k8s-version-098965" [3be22a1a-db9f-446f-9b0a-e61ce5482e12] Running
I1124 03:38:00.970969 456828 system_pods.go:89] "kube-proxy-5t7nq" [6050bdb0-6390-48c7-863f-520ef6277ad8] Running
I1124 03:38:00.970973 456828 system_pods.go:89] "kube-scheduler-old-k8s-version-098965" [ff509e4b-4fde-4ea0-8261-5f4463c5be01] Running
I1124 03:38:00.970978 456828 system_pods.go:89] "storage-provisioner" [9ede1da5-704c-4aab-93e0-77ce93158129] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 03:38:00.970996 456828 retry.go:31] will retry after 426.462867ms: missing components: kube-dns
I1124 03:38:01.403286 456828 system_pods.go:86] 8 kube-system pods found
I1124 03:38:01.403315 456828 system_pods.go:89] "coredns-5dd5756b68-2kmf2" [9c6642fb-17b7-4199-b927-eb63b9a58260] Running
I1124 03:38:01.403322 456828 system_pods.go:89] "etcd-old-k8s-version-098965" [994c486f-9839-4407-bc6d-d7c52c9dcfe7] Running
I1124 03:38:01.403330 456828 system_pods.go:89] "kindnet-mctv9" [0f0d91cd-7d64-482e-b33c-383b20f5bd79] Running
I1124 03:38:01.403335 456828 system_pods.go:89] "kube-apiserver-old-k8s-version-098965" [777b36fe-0c46-4427-90b9-ef48ae1cc287] Running
I1124 03:38:01.403341 456828 system_pods.go:89] "kube-controller-manager-old-k8s-version-098965" [3be22a1a-db9f-446f-9b0a-e61ce5482e12] Running
I1124 03:38:01.403345 456828 system_pods.go:89] "kube-proxy-5t7nq" [6050bdb0-6390-48c7-863f-520ef6277ad8] Running
I1124 03:38:01.403349 456828 system_pods.go:89] "kube-scheduler-old-k8s-version-098965" [ff509e4b-4fde-4ea0-8261-5f4463c5be01] Running
I1124 03:38:01.403353 456828 system_pods.go:89] "storage-provisioner" [9ede1da5-704c-4aab-93e0-77ce93158129] Running
I1124 03:38:01.403362 456828 system_pods.go:126] duration metric: took 973.897592ms to wait for k8s-apps to be running ...
I1124 03:38:01.403373 456828 system_svc.go:44] waiting for kubelet service to be running ....
I1124 03:38:01.403427 456828 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 03:38:01.421738 456828 system_svc.go:56] duration metric: took 18.35448ms WaitForService to wait for kubelet
I1124 03:38:01.421765 456828 kubeadm.go:587] duration metric: took 14.454425317s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 03:38:01.421786 456828 node_conditions.go:102] verifying NodePressure condition ...
I1124 03:38:01.425010 456828 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1124 03:38:01.425044 456828 node_conditions.go:123] node cpu capacity is 2
I1124 03:38:01.425059 456828 node_conditions.go:105] duration metric: took 3.267233ms to run NodePressure ...
I1124 03:38:01.425099 456828 start.go:242] waiting for startup goroutines ...
I1124 03:38:01.425108 456828 start.go:247] waiting for cluster config update ...
I1124 03:38:01.425124 456828 start.go:256] writing updated cluster config ...
I1124 03:38:01.425448 456828 ssh_runner.go:195] Run: rm -f paused
I1124 03:38:01.429212 456828 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 03:38:01.435249 456828 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-2kmf2" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:01.447218 456828 pod_ready.go:94] pod "coredns-5dd5756b68-2kmf2" is "Ready"
I1124 03:38:01.447254 456828 pod_ready.go:86] duration metric: took 11.97007ms for pod "coredns-5dd5756b68-2kmf2" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:01.452465 456828 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:01.459099 456828 pod_ready.go:94] pod "etcd-old-k8s-version-098965" is "Ready"
I1124 03:38:01.459128 456828 pod_ready.go:86] duration metric: took 6.576599ms for pod "etcd-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:01.471032 456828 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:01.476662 456828 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-098965" is "Ready"
I1124 03:38:01.476688 456828 pod_ready.go:86] duration metric: took 5.56861ms for pod "kube-apiserver-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:01.480096 456828 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:01.833649 456828 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-098965" is "Ready"
I1124 03:38:01.833715 456828 pod_ready.go:86] duration metric: took 353.588012ms for pod "kube-controller-manager-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:02.035335 456828 pod_ready.go:83] waiting for pod "kube-proxy-5t7nq" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:02.433941 456828 pod_ready.go:94] pod "kube-proxy-5t7nq" is "Ready"
I1124 03:38:02.433973 456828 pod_ready.go:86] duration metric: took 398.560828ms for pod "kube-proxy-5t7nq" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:02.633735 456828 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:03.033530 456828 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-098965" is "Ready"
I1124 03:38:03.033561 456828 pod_ready.go:86] duration metric: took 399.801466ms for pod "kube-scheduler-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:03.033575 456828 pod_ready.go:40] duration metric: took 1.604321281s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 03:38:03.103182 456828 start.go:625] kubectl: 1.33.2, cluster: 1.28.0 (minor skew: 5)
I1124 03:38:03.106579 456828 out.go:203]
W1124 03:38:03.109581 456828 out.go:285] ! /usr/local/bin/kubectl is version 1.33.2, which may have incompatibilities with Kubernetes 1.28.0.
I1124 03:38:03.112629 456828 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1124 03:38:03.116685 456828 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-098965" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
2473c87591ead 1611cd07b61d5 6 seconds ago Running busybox 0 0780548608168 busybox default
32ab776c7affb ba04bb24b9575 11 seconds ago Running storage-provisioner 0 ddcd48a171630 storage-provisioner kube-system
28a52e8d1e9e4 97e04611ad434 11 seconds ago Running coredns 0 aa01d3a3f7cba coredns-5dd5756b68-2kmf2 kube-system
37f20e76ffbc2 b1a8c6f707935 23 seconds ago Running kindnet-cni 0 2a6bd814ac01e kindnet-mctv9 kube-system
4baa8c107b38c 940f54a5bcae9 25 seconds ago Running kube-proxy 0 b85e6b6d514cc kube-proxy-5t7nq kube-system
8fb25b361e023 9cdd6470f48c8 47 seconds ago Running etcd 0 b669262c23763 etcd-old-k8s-version-098965 kube-system
666ad3b5bbcc5 00543d2fe5d71 47 seconds ago Running kube-apiserver 0 9edcf3c3e4d9e kube-apiserver-old-k8s-version-098965 kube-system
95905c97af2e4 762dce4090c5f 47 seconds ago Running kube-scheduler 0 d6f0d280dee01 kube-scheduler-old-k8s-version-098965 kube-system
94d7bde87dab5 46cc66ccc7c19 47 seconds ago Running kube-controller-manager 0 8eb2c9f965876 kube-controller-manager-old-k8s-version-098965 kube-system
==> containerd <==
Nov 24 03:38:00 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:00.893826021Z" level=info msg="Container 32ab776c7affb85bd5965dee0104d1470d0553d2b7a80e479ea0fc030ea67388: CDI devices from CRI Config.CDIDevices: []"
Nov 24 03:38:00 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:00.901529017Z" level=info msg="StartContainer for \"28a52e8d1e9e4c99322bf7f4a542d09e22eed502ede9105bfd3867fff8b743ae\""
Nov 24 03:38:00 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:00.902763625Z" level=info msg="connecting to shim 28a52e8d1e9e4c99322bf7f4a542d09e22eed502ede9105bfd3867fff8b743ae" address="unix:///run/containerd/s/70d70892534976c42f017b6a57c07c5f882e60cfc509cf351b04e5c63883f9c6" protocol=ttrpc version=3
Nov 24 03:38:00 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:00.923930845Z" level=info msg="CreateContainer within sandbox \"ddcd48a171630d558701e23e8b84d43ca3b433b204586da5fd73071e2c73cf02\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"32ab776c7affb85bd5965dee0104d1470d0553d2b7a80e479ea0fc030ea67388\""
Nov 24 03:38:00 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:00.927472048Z" level=info msg="StartContainer for \"32ab776c7affb85bd5965dee0104d1470d0553d2b7a80e479ea0fc030ea67388\""
Nov 24 03:38:00 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:00.929854567Z" level=info msg="connecting to shim 32ab776c7affb85bd5965dee0104d1470d0553d2b7a80e479ea0fc030ea67388" address="unix:///run/containerd/s/5ddd01c5f051ac256aede9694ac052a9c600e13f3e3f44d833556ac361f844c9" protocol=ttrpc version=3
Nov 24 03:38:00 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:00.986416771Z" level=info msg="StartContainer for \"28a52e8d1e9e4c99322bf7f4a542d09e22eed502ede9105bfd3867fff8b743ae\" returns successfully"
Nov 24 03:38:01 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:01.032852607Z" level=info msg="StartContainer for \"32ab776c7affb85bd5965dee0104d1470d0553d2b7a80e479ea0fc030ea67388\" returns successfully"
Nov 24 03:38:03 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:03.633430300Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:b377806c-ae20-44d2-9d0f-07b097026328,Namespace:default,Attempt:0,}"
Nov 24 03:38:03 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:03.690035484Z" level=info msg="connecting to shim 07805486081686e75b51f404a8d192120c8e44f1df35435e82a18cd840b250a6" address="unix:///run/containerd/s/62c9570c9e36a3dfb4b0454e8ff44f8873d73aec0247dc7c06a4c63bdd606e84" namespace=k8s.io protocol=ttrpc version=3
Nov 24 03:38:03 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:03.757467931Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:b377806c-ae20-44d2-9d0f-07b097026328,Namespace:default,Attempt:0,} returns sandbox id \"07805486081686e75b51f404a8d192120c8e44f1df35435e82a18cd840b250a6\""
Nov 24 03:38:03 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:03.759222049Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.797829117Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.799579082Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=1937183"
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.802353497Z" level=info msg="ImageCreate event name:\"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.809134257Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.810394309Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"1935750\" in 2.051134434s"
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.810432586Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\""
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.813949961Z" level=info msg="CreateContainer within sandbox \"07805486081686e75b51f404a8d192120c8e44f1df35435e82a18cd840b250a6\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.827500676Z" level=info msg="Container 2473c87591ead98e23e27a6582c8fc6bfb2afc235a7786ab166b053a67742968: CDI devices from CRI Config.CDIDevices: []"
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.839960013Z" level=info msg="CreateContainer within sandbox \"07805486081686e75b51f404a8d192120c8e44f1df35435e82a18cd840b250a6\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"2473c87591ead98e23e27a6582c8fc6bfb2afc235a7786ab166b053a67742968\""
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.841216955Z" level=info msg="StartContainer for \"2473c87591ead98e23e27a6582c8fc6bfb2afc235a7786ab166b053a67742968\""
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.842449240Z" level=info msg="connecting to shim 2473c87591ead98e23e27a6582c8fc6bfb2afc235a7786ab166b053a67742968" address="unix:///run/containerd/s/62c9570c9e36a3dfb4b0454e8ff44f8873d73aec0247dc7c06a4c63bdd606e84" protocol=ttrpc version=3
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.911309396Z" level=info msg="StartContainer for \"2473c87591ead98e23e27a6582c8fc6bfb2afc235a7786ab166b053a67742968\" returns successfully"
Nov 24 03:38:11 old-k8s-version-098965 containerd[757]: E1124 03:38:11.465754 757 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [28a52e8d1e9e4c99322bf7f4a542d09e22eed502ede9105bfd3867fff8b743ae] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 8aa94104b4dae56b00431f7362ac05b997af2246775de35dc2eb361b0707b2fa7199f9ddfdba27fdef1331b76d09c41700f6cb5d00836dabab7c0df8e651283f
CoreDNS-1.10.1
linux/arm64, go1.20, 055b2c3
[INFO] 127.0.0.1:36019 - 46965 "HINFO IN 101273306430571101.3418018538030985896. udp 56 false 512" NXDOMAIN qr,rd,ra 56 0.022963225s
==> describe nodes <==
Name: old-k8s-version-098965
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=old-k8s-version-098965
kubernetes.io/os=linux
minikube.k8s.io/commit=525fef2394fe4854b27b3c3385e33403fd802864
minikube.k8s.io/name=old-k8s-version-098965
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_24T03_37_34_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 24 Nov 2025 03:37:29 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-098965
AcquireTime: <unset>
RenewTime: Mon, 24 Nov 2025 03:38:03 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 24 Nov 2025 03:38:03 +0000 Mon, 24 Nov 2025 03:37:25 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 24 Nov 2025 03:38:03 +0000 Mon, 24 Nov 2025 03:37:25 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 24 Nov 2025 03:38:03 +0000 Mon, 24 Nov 2025 03:37:25 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 24 Nov 2025 03:38:03 +0000 Mon, 24 Nov 2025 03:38:00 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.85.2
Hostname: old-k8s-version-098965
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 304a86241bf1bbb85bd31db5692386d7
System UUID: 016e6bb7-0740-4efc-ad46-1814703763df
Boot ID: 63a8a852-1462-44b1-9d6f-f77d26e8568f
Kernel Version: 5.15.0-1084-aws
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9s
kube-system coredns-5dd5756b68-2kmf2 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 26s
kube-system etcd-old-k8s-version-098965 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 39s
kube-system kindnet-mctv9 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 26s
kube-system kube-apiserver-old-k8s-version-098965 250m (12%) 0 (0%) 0 (0%) 0 (0%) 39s
kube-system kube-controller-manager-old-k8s-version-098965 200m (10%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system kube-proxy-5t7nq 0 (0%) 0 (0%) 0 (0%) 0 (0%) 26s
kube-system kube-scheduler-old-k8s-version-098965 100m (5%) 0 (0%) 0 (0%) 0 (0%) 39s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 24s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 100m (5%)
memory 220Mi (2%) 220Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 24s kube-proxy
Normal NodeHasSufficientMemory 48s (x8 over 48s) kubelet Node old-k8s-version-098965 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 48s (x8 over 48s) kubelet Node old-k8s-version-098965 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 48s (x7 over 48s) kubelet Node old-k8s-version-098965 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 48s kubelet Updated Node Allocatable limit across pods
Normal Starting 40s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 39s kubelet Node old-k8s-version-098965 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 39s kubelet Node old-k8s-version-098965 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 39s kubelet Node old-k8s-version-098965 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 39s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 27s node-controller Node old-k8s-version-098965 event: Registered Node old-k8s-version-098965 in Controller
Normal NodeReady 12s kubelet Node old-k8s-version-098965 status is now: NodeReady
==> dmesg <==
[Nov24 02:27] overlayfs: idmapped layers are currently not supported
[Nov24 02:28] overlayfs: idmapped layers are currently not supported
[Nov24 02:30] overlayfs: idmapped layers are currently not supported
[ +9.824160] overlayfs: idmapped layers are currently not supported
[Nov24 02:31] overlayfs: idmapped layers are currently not supported
[Nov24 02:32] overlayfs: idmapped layers are currently not supported
[ +27.981383] overlayfs: idmapped layers are currently not supported
[Nov24 02:33] overlayfs: idmapped layers are currently not supported
[Nov24 02:34] overlayfs: idmapped layers are currently not supported
[Nov24 02:35] overlayfs: idmapped layers are currently not supported
[Nov24 02:36] overlayfs: idmapped layers are currently not supported
[Nov24 02:37] overlayfs: idmapped layers are currently not supported
[Nov24 02:38] overlayfs: idmapped layers are currently not supported
[Nov24 02:39] overlayfs: idmapped layers are currently not supported
[ +24.837346] overlayfs: idmapped layers are currently not supported
[Nov24 02:40] overlayfs: idmapped layers are currently not supported
[ +40.823948] overlayfs: idmapped layers are currently not supported
[ +1.705989] overlayfs: idmapped layers are currently not supported
[Nov24 02:42] overlayfs: idmapped layers are currently not supported
[ +21.661904] overlayfs: idmapped layers are currently not supported
[Nov24 02:44] overlayfs: idmapped layers are currently not supported
[ +1.074777] overlayfs: idmapped layers are currently not supported
[Nov24 02:46] overlayfs: idmapped layers are currently not supported
[ +19.120392] overlayfs: idmapped layers are currently not supported
[Nov24 02:48] kauditd_printk_skb: 8 callbacks suppressed
==> etcd [8fb25b361e0239913db0778bdfb64d93fee6d1a16be3fd7f4f316e46a892bbde] <==
{"level":"info","ts":"2025-11-24T03:37:25.43693Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed switched to configuration voters=(11459225503572592365)"}
{"level":"info","ts":"2025-11-24T03:37:25.437104Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"68eaea490fab4e05","local-member-id":"9f0758e1c58a86ed","added-peer-id":"9f0758e1c58a86ed","added-peer-peer-urls":["https://192.168.85.2:2380"]}
{"level":"info","ts":"2025-11-24T03:37:25.441421Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-11-24T03:37:25.44164Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.85.2:2380"}
{"level":"info","ts":"2025-11-24T03:37:25.441821Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.85.2:2380"}
{"level":"info","ts":"2025-11-24T03:37:25.445077Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"9f0758e1c58a86ed","initial-advertise-peer-urls":["https://192.168.85.2:2380"],"listen-peer-urls":["https://192.168.85.2:2380"],"advertise-client-urls":["https://192.168.85.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.85.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-24T03:37:25.445165Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-24T03:37:25.495636Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed is starting a new election at term 1"}
{"level":"info","ts":"2025-11-24T03:37:25.495889Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-24T03:37:25.496002Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed received MsgPreVoteResp from 9f0758e1c58a86ed at term 1"}
{"level":"info","ts":"2025-11-24T03:37:25.496143Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed became candidate at term 2"}
{"level":"info","ts":"2025-11-24T03:37:25.496408Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed received MsgVoteResp from 9f0758e1c58a86ed at term 2"}
{"level":"info","ts":"2025-11-24T03:37:25.497335Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed became leader at term 2"}
{"level":"info","ts":"2025-11-24T03:37:25.497479Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: 9f0758e1c58a86ed elected leader 9f0758e1c58a86ed at term 2"}
{"level":"info","ts":"2025-11-24T03:37:25.498979Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"9f0758e1c58a86ed","local-member-attributes":"{Name:old-k8s-version-098965 ClientURLs:[https://192.168.85.2:2379]}","request-path":"/0/members/9f0758e1c58a86ed/attributes","cluster-id":"68eaea490fab4e05","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-24T03:37:25.499256Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-24T03:37:25.50078Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.85.2:2379"}
{"level":"info","ts":"2025-11-24T03:37:25.500939Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-24T03:37:25.501272Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T03:37:25.503825Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-24T03:37:25.502905Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-24T03:37:25.504027Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-24T03:37:25.506444Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"68eaea490fab4e05","local-member-id":"9f0758e1c58a86ed","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T03:37:25.506667Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T03:37:25.506735Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
==> kernel <==
03:38:12 up 2:20, 0 user, load average: 1.98, 3.10, 2.74
Linux old-k8s-version-098965 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [37f20e76ffbc24c2b929d70181ec4667f979dd10e9528ae0a376dca755a608bd] <==
I1124 03:37:49.827895 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1124 03:37:49.828142 1 main.go:139] hostIP = 192.168.85.2
podIP = 192.168.85.2
I1124 03:37:49.828290 1 main.go:148] setting mtu 1500 for CNI
I1124 03:37:49.828302 1 main.go:178] kindnetd IP family: "ipv4"
I1124 03:37:49.828312 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-24T03:37:50Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1124 03:37:50.033133 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1124 03:37:50.033241 1 controller.go:381] "Waiting for informer caches to sync"
I1124 03:37:50.033288 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1124 03:37:50.034681 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1124 03:37:50.324571 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1124 03:37:50.324658 1 metrics.go:72] Registering metrics
I1124 03:37:50.324749 1 controller.go:711] "Syncing nftables rules"
I1124 03:38:00.040225 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1124 03:38:00.040277 1 main.go:301] handling current node
I1124 03:38:10.032819 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1124 03:38:10.033055 1 main.go:301] handling current node
==> kube-apiserver [666ad3b5bbcc57cef3344095ab7c6a95424fcdae77e237214b172a62b87abb2e] <==
I1124 03:37:29.817316 1 apf_controller.go:380] Running API Priority and Fairness periodic rebalancing process
I1124 03:37:29.821018 1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
I1124 03:37:29.821061 1 aggregator.go:166] initial CRD sync complete...
I1124 03:37:29.821069 1 autoregister_controller.go:141] Starting autoregister controller
I1124 03:37:29.821233 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1124 03:37:29.821307 1 cache.go:39] Caches are synced for autoregister controller
I1124 03:37:29.822681 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1124 03:37:29.854244 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1124 03:37:29.879343 1 shared_informer.go:318] Caches are synced for node_authorizer
I1124 03:37:29.891373 1 cache.go:39] Caches are synced for AvailableConditionController controller
I1124 03:37:30.501665 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1124 03:37:30.515842 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1124 03:37:30.515870 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1124 03:37:31.168083 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1124 03:37:31.220692 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1124 03:37:31.327576 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1124 03:37:31.335539 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.85.2]
I1124 03:37:31.336837 1 controller.go:624] quota admission added evaluator for: endpoints
I1124 03:37:31.342035 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1124 03:37:31.795879 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1124 03:37:32.895003 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1124 03:37:32.910864 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1124 03:37:32.928122 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1124 03:37:45.687691 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1124 03:37:46.683285 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [94d7bde87dab52f8ec3b1763043f2afa14f31bf91ba4ddd110aa3c091eb1f236] <==
I1124 03:37:45.830060 1 node_lifecycle_controller.go:1225] "Initializing eviction metric for zone" zone=""
I1124 03:37:45.830629 1 node_lifecycle_controller.go:877] "Missing timestamp for Node. Assuming now as a timestamp" node="old-k8s-version-098965"
I1124 03:37:45.832023 1 node_lifecycle_controller.go:1029] "Controller detected that all Nodes are not-Ready. Entering master disruption mode"
I1124 03:37:45.830221 1 event.go:307] "Event occurred" object="old-k8s-version-098965" fieldPath="" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node old-k8s-version-098965 event: Registered Node old-k8s-version-098965 in Controller"
I1124 03:37:46.234151 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 03:37:46.277193 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 03:37:46.277382 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1124 03:37:46.504268 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-xqjm9"
I1124 03:37:46.531473 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-2kmf2"
I1124 03:37:46.546744 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="854.357749ms"
I1124 03:37:46.566310 1 event.go:307] "Event occurred" object="kube-dns" fieldPath="" kind="Endpoints" apiVersion="v1" type="Warning" reason="FailedToCreateEndpoint" message="Failed to create endpoint for service kube-system/kube-dns: endpoints \"kube-dns\" already exists"
I1124 03:37:46.584884 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="38.082715ms"
I1124 03:37:46.585113 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="149.391µs"
I1124 03:37:46.696833 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-5t7nq"
I1124 03:37:46.703751 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-mctv9"
I1124 03:37:48.352432 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1124 03:37:48.387265 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-xqjm9"
I1124 03:37:48.403262 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="51.894657ms"
I1124 03:37:48.414134 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="10.824275ms"
I1124 03:37:48.414238 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="43.528µs"
I1124 03:38:00.391250 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="131.267µs"
I1124 03:38:00.449093 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="133.589µs"
I1124 03:38:00.836415 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
I1124 03:38:01.292788 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="13.437073ms"
I1124 03:38:01.294027 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="1.155904ms"
==> kube-proxy [4baa8c107b38cc2761e31cd050e33ec89802d4aa44bd4f1d1d031950a9d835ec] <==
I1124 03:37:47.752353 1 server_others.go:69] "Using iptables proxy"
I1124 03:37:47.775066 1 node.go:141] Successfully retrieved node IP: 192.168.85.2
I1124 03:37:47.844709 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1124 03:37:47.849188 1 server_others.go:152] "Using iptables Proxier"
I1124 03:37:47.849234 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1124 03:37:47.849286 1 server_others.go:438] "Defaulting to no-op detect-local"
I1124 03:37:47.849319 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1124 03:37:47.849526 1 server.go:846] "Version info" version="v1.28.0"
I1124 03:37:47.849543 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1124 03:37:47.851283 1 config.go:188] "Starting service config controller"
I1124 03:37:47.851308 1 shared_informer.go:311] Waiting for caches to sync for service config
I1124 03:37:47.851328 1 config.go:97] "Starting endpoint slice config controller"
I1124 03:37:47.851333 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1124 03:37:47.851909 1 config.go:315] "Starting node config controller"
I1124 03:37:47.851919 1 shared_informer.go:311] Waiting for caches to sync for node config
I1124 03:37:47.952223 1 shared_informer.go:318] Caches are synced for node config
I1124 03:37:47.952255 1 shared_informer.go:318] Caches are synced for service config
I1124 03:37:47.952281 1 shared_informer.go:318] Caches are synced for endpoint slice config
==> kube-scheduler [95905c97af2e4e393feeaef2edf3e1c7c5fc6dcb11cccf3554a17255c56bd15d] <==
W1124 03:37:29.836095 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1124 03:37:29.836123 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1124 03:37:30.637685 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1124 03:37:30.637945 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1124 03:37:30.642715 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1124 03:37:30.642753 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1124 03:37:30.708776 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1124 03:37:30.708817 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1124 03:37:30.711532 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1124 03:37:30.711569 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1124 03:37:30.717417 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1124 03:37:30.717460 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1124 03:37:30.738383 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1124 03:37:30.738423 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1124 03:37:30.770745 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1124 03:37:30.770991 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1124 03:37:30.836552 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1124 03:37:30.836594 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1124 03:37:30.842629 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1124 03:37:30.842859 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1124 03:37:30.843777 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1124 03:37:30.843981 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1124 03:37:30.921894 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1124 03:37:30.922102 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
I1124 03:37:33.702680 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 24 03:37:45 old-k8s-version-098965 kubelet[1540]: I1124 03:37:45.681266 1540 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.713561 1540 topology_manager.go:215] "Topology Admit Handler" podUID="6050bdb0-6390-48c7-863f-520ef6277ad8" podNamespace="kube-system" podName="kube-proxy-5t7nq"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.716233 1540 topology_manager.go:215] "Topology Admit Handler" podUID="0f0d91cd-7d64-482e-b33c-383b20f5bd79" podNamespace="kube-system" podName="kindnet-mctv9"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.767542 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/0f0d91cd-7d64-482e-b33c-383b20f5bd79-cni-cfg\") pod \"kindnet-mctv9\" (UID: \"0f0d91cd-7d64-482e-b33c-383b20f5bd79\") " pod="kube-system/kindnet-mctv9"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.767756 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/0f0d91cd-7d64-482e-b33c-383b20f5bd79-xtables-lock\") pod \"kindnet-mctv9\" (UID: \"0f0d91cd-7d64-482e-b33c-383b20f5bd79\") " pod="kube-system/kindnet-mctv9"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.767863 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgdbr\" (UniqueName: \"kubernetes.io/projected/0f0d91cd-7d64-482e-b33c-383b20f5bd79-kube-api-access-tgdbr\") pod \"kindnet-mctv9\" (UID: \"0f0d91cd-7d64-482e-b33c-383b20f5bd79\") " pod="kube-system/kindnet-mctv9"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.767964 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/6050bdb0-6390-48c7-863f-520ef6277ad8-xtables-lock\") pod \"kube-proxy-5t7nq\" (UID: \"6050bdb0-6390-48c7-863f-520ef6277ad8\") " pod="kube-system/kube-proxy-5t7nq"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.768057 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/6050bdb0-6390-48c7-863f-520ef6277ad8-lib-modules\") pod \"kube-proxy-5t7nq\" (UID: \"6050bdb0-6390-48c7-863f-520ef6277ad8\") " pod="kube-system/kube-proxy-5t7nq"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.768153 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnmtw\" (UniqueName: \"kubernetes.io/projected/6050bdb0-6390-48c7-863f-520ef6277ad8-kube-api-access-dnmtw\") pod \"kube-proxy-5t7nq\" (UID: \"6050bdb0-6390-48c7-863f-520ef6277ad8\") " pod="kube-system/kube-proxy-5t7nq"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.768259 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/6050bdb0-6390-48c7-863f-520ef6277ad8-kube-proxy\") pod \"kube-proxy-5t7nq\" (UID: \"6050bdb0-6390-48c7-863f-520ef6277ad8\") " pod="kube-system/kube-proxy-5t7nq"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.768359 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/0f0d91cd-7d64-482e-b33c-383b20f5bd79-lib-modules\") pod \"kindnet-mctv9\" (UID: \"0f0d91cd-7d64-482e-b33c-383b20f5bd79\") " pod="kube-system/kindnet-mctv9"
Nov 24 03:37:50 old-k8s-version-098965 kubelet[1540]: I1124 03:37:50.218063 1540 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-5t7nq" podStartSLOduration=4.2180200469999996 podCreationTimestamp="2025-11-24 03:37:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 03:37:48.208401476 +0000 UTC m=+15.367432854" watchObservedRunningTime="2025-11-24 03:37:50.218020047 +0000 UTC m=+17.377051399"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.190884 1540 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.379008 1540 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-mctv9" podStartSLOduration=12.155727494 podCreationTimestamp="2025-11-24 03:37:46 +0000 UTC" firstStartedPulling="2025-11-24 03:37:47.330956178 +0000 UTC m=+14.489987539" lastFinishedPulling="2025-11-24 03:37:49.554182857 +0000 UTC m=+16.713214218" observedRunningTime="2025-11-24 03:37:50.219024146 +0000 UTC m=+17.378055507" watchObservedRunningTime="2025-11-24 03:38:00.378954173 +0000 UTC m=+27.537985543"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.379210 1540 topology_manager.go:215] "Topology Admit Handler" podUID="9ede1da5-704c-4aab-93e0-77ce93158129" podNamespace="kube-system" podName="storage-provisioner"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.394275 1540 topology_manager.go:215] "Topology Admit Handler" podUID="9c6642fb-17b7-4199-b927-eb63b9a58260" podNamespace="kube-system" podName="coredns-5dd5756b68-2kmf2"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.504386 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgg48\" (UniqueName: \"kubernetes.io/projected/9c6642fb-17b7-4199-b927-eb63b9a58260-kube-api-access-fgg48\") pod \"coredns-5dd5756b68-2kmf2\" (UID: \"9c6642fb-17b7-4199-b927-eb63b9a58260\") " pod="kube-system/coredns-5dd5756b68-2kmf2"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.504451 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c6642fb-17b7-4199-b927-eb63b9a58260-config-volume\") pod \"coredns-5dd5756b68-2kmf2\" (UID: \"9c6642fb-17b7-4199-b927-eb63b9a58260\") " pod="kube-system/coredns-5dd5756b68-2kmf2"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.504532 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8snrh\" (UniqueName: \"kubernetes.io/projected/9ede1da5-704c-4aab-93e0-77ce93158129-kube-api-access-8snrh\") pod \"storage-provisioner\" (UID: \"9ede1da5-704c-4aab-93e0-77ce93158129\") " pod="kube-system/storage-provisioner"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.504567 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/9ede1da5-704c-4aab-93e0-77ce93158129-tmp\") pod \"storage-provisioner\" (UID: \"9ede1da5-704c-4aab-93e0-77ce93158129\") " pod="kube-system/storage-provisioner"
Nov 24 03:38:01 old-k8s-version-098965 kubelet[1540]: I1124 03:38:01.277737 1540 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=13.277693839 podCreationTimestamp="2025-11-24 03:37:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 03:38:01.256250253 +0000 UTC m=+28.415281605" watchObservedRunningTime="2025-11-24 03:38:01.277693839 +0000 UTC m=+28.436725192"
Nov 24 03:38:03 old-k8s-version-098965 kubelet[1540]: I1124 03:38:03.329633 1540 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-2kmf2" podStartSLOduration=17.329588381 podCreationTimestamp="2025-11-24 03:37:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 03:38:01.279992944 +0000 UTC m=+28.439024297" watchObservedRunningTime="2025-11-24 03:38:03.329588381 +0000 UTC m=+30.488619734"
Nov 24 03:38:03 old-k8s-version-098965 kubelet[1540]: I1124 03:38:03.329845 1540 topology_manager.go:215] "Topology Admit Handler" podUID="b377806c-ae20-44d2-9d0f-07b097026328" podNamespace="default" podName="busybox"
Nov 24 03:38:03 old-k8s-version-098965 kubelet[1540]: I1124 03:38:03.426801 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wn2qh\" (UniqueName: \"kubernetes.io/projected/b377806c-ae20-44d2-9d0f-07b097026328-kube-api-access-wn2qh\") pod \"busybox\" (UID: \"b377806c-ae20-44d2-9d0f-07b097026328\") " pod="default/busybox"
Nov 24 03:38:06 old-k8s-version-098965 kubelet[1540]: I1124 03:38:06.274643 1540 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.222747006 podCreationTimestamp="2025-11-24 03:38:03 +0000 UTC" firstStartedPulling="2025-11-24 03:38:03.75886784 +0000 UTC m=+30.917899193" lastFinishedPulling="2025-11-24 03:38:05.810715943 +0000 UTC m=+32.969747296" observedRunningTime="2025-11-24 03:38:06.27449371 +0000 UTC m=+33.433525071" watchObservedRunningTime="2025-11-24 03:38:06.274595109 +0000 UTC m=+33.433626495"
==> storage-provisioner [32ab776c7affb85bd5965dee0104d1470d0553d2b7a80e479ea0fc030ea67388] <==
I1124 03:38:01.039603 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1124 03:38:01.054106 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1124 03:38:01.054328 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1124 03:38:01.064918 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1124 03:38:01.065095 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-098965_e3e8caf0-85bd-4d0b-af08-80a33b7d616e!
I1124 03:38:01.066102 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"c304fee8-eb73-4695-8997-27ec70001b31", APIVersion:"v1", ResourceVersion:"438", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-098965_e3e8caf0-85bd-4d0b-af08-80a33b7d616e became leader
I1124 03:38:01.165252 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-098965_e3e8caf0-85bd-4d0b-af08-80a33b7d616e!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-098965 -n old-k8s-version-098965
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-098965 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-098965
helpers_test.go:243: (dbg) docker inspect old-k8s-version-098965:
-- stdout --
[
{
"Id": "51b62bc50b581270fcb4bc2e1c574a9a6681d89c3887762aa06dd29ac0c65022",
"Created": "2025-11-24T03:37:06.167962609Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 457210,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-24T03:37:06.24041942Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:fbb44bc62521f331457dff002aaa5e1e27856f9e53853b3b3ee62969be454028",
"ResolvConfPath": "/var/lib/docker/containers/51b62bc50b581270fcb4bc2e1c574a9a6681d89c3887762aa06dd29ac0c65022/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/51b62bc50b581270fcb4bc2e1c574a9a6681d89c3887762aa06dd29ac0c65022/hostname",
"HostsPath": "/var/lib/docker/containers/51b62bc50b581270fcb4bc2e1c574a9a6681d89c3887762aa06dd29ac0c65022/hosts",
"LogPath": "/var/lib/docker/containers/51b62bc50b581270fcb4bc2e1c574a9a6681d89c3887762aa06dd29ac0c65022/51b62bc50b581270fcb4bc2e1c574a9a6681d89c3887762aa06dd29ac0c65022-json.log",
"Name": "/old-k8s-version-098965",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"old-k8s-version-098965:/var",
"/lib/modules:/lib/modules:ro"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "old-k8s-version-098965",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "51b62bc50b581270fcb4bc2e1c574a9a6681d89c3887762aa06dd29ac0c65022",
"LowerDir": "/var/lib/docker/overlay2/8effb39b7e48dc2e06628c564f9eb8d7a6134b67b474f4243a9f92d81eed72e6-init/diff:/var/lib/docker/overlay2/11b197f530f0d571f61892814d8d4c774f7d3e5a97abdd8c5aa182cc99b2d856/diff",
"MergedDir": "/var/lib/docker/overlay2/8effb39b7e48dc2e06628c564f9eb8d7a6134b67b474f4243a9f92d81eed72e6/merged",
"UpperDir": "/var/lib/docker/overlay2/8effb39b7e48dc2e06628c564f9eb8d7a6134b67b474f4243a9f92d81eed72e6/diff",
"WorkDir": "/var/lib/docker/overlay2/8effb39b7e48dc2e06628c564f9eb8d7a6134b67b474f4243a9f92d81eed72e6/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "old-k8s-version-098965",
"Source": "/var/lib/docker/volumes/old-k8s-version-098965/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "old-k8s-version-098965",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-098965",
"name.minikube.sigs.k8s.io": "old-k8s-version-098965",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "1fdc3bd4111da77a7219abec40237713d3aafb5294361ea9ac940f031b5e9874",
"SandboxKey": "/var/run/docker/netns/1fdc3bd4111d",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33418"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33419"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33422"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33420"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33421"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"old-k8s-version-098965": {
"IPAMConfig": {
"IPv4Address": "192.168.85.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "4e:8b:8f:f7:48:e2",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "a787be3020cdc92e0572d92f4bf90ce3f3c7948fc2d2deef82cd4a5f099c319a",
"EndpointID": "0c39f7b7035a14f48a48394a60897ac0eb2db5edb711c6ca54097ce4804ab54d",
"Gateway": "192.168.85.1",
"IPAddress": "192.168.85.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-098965",
"51b62bc50b58"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-098965 -n old-k8s-version-098965
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p old-k8s-version-098965 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p old-k8s-version-098965 logs -n 25: (1.289253666s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────────┬─────────┬─────────┬─────────────────────┬───────────
──────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────────┼─────────┼─────────┼─────────────────────┼───────────
──────────┤
│ ssh │ -p cilium-842431 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo cri-dockerd --version │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo systemctl status containerd --all --full --no-pager │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo systemctl cat containerd --no-pager │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo cat /lib/systemd/system/containerd.service │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo cat /etc/containerd/config.toml │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo containerd config dump │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo systemctl status crio --all --full --no-pager │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo systemctl cat crio --no-pager │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ ssh │ -p cilium-842431 sudo crio config │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ delete │ -p cilium-842431 │ cilium-842431 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ 24 Nov 25 03:35 UTC │
│ start │ -p force-systemd-env-574539 --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-env-574539 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ 24 Nov 25 03:36 UTC │
│ start │ -p kubernetes-upgrade-850960 --memory=3072 --kubernetes-version=v1.28.0 --driver=docker --container-runtime=containerd │ kubernetes-upgrade-850960 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ │
│ start │ -p kubernetes-upgrade-850960 --memory=3072 --kubernetes-version=v1.34.1 --alsologtostderr -v=1 --driver=docker --container-runtime=containerd │ kubernetes-upgrade-850960 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ 24 Nov 25 03:35 UTC │
│ delete │ -p kubernetes-upgrade-850960 │ kubernetes-upgrade-850960 │ jenkins │ v1.37.0 │ 24 Nov 25 03:35 UTC │ 24 Nov 25 03:36 UTC │
│ start │ -p cert-expiration-846384 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=containerd │ cert-expiration-846384 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:36 UTC │
│ ssh │ force-systemd-env-574539 ssh cat /etc/containerd/config.toml │ force-systemd-env-574539 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:36 UTC │
│ delete │ -p force-systemd-env-574539 │ force-systemd-env-574539 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:36 UTC │
│ start │ -p cert-options-216763 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-216763 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:36 UTC │
│ ssh │ cert-options-216763 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-216763 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:36 UTC │
│ ssh │ -p cert-options-216763 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-216763 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:36 UTC │
│ delete │ -p cert-options-216763 │ cert-options-216763 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:36 UTC │
│ start │ -p old-k8s-version-098965 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-098965 │ jenkins │ v1.37.0 │ 24 Nov 25 03:36 UTC │ 24 Nov 25 03:38 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────────┴─────────┴─────────┴─────────────────────┴───────────
──────────┘
==> Last Start <==
Log file created at: 2025/11/24 03:36:59
Running on machine: ip-172-31-24-2
Binary: Built with gc go1.25.3 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1124 03:36:59.219087 456828 out.go:360] Setting OutFile to fd 1 ...
I1124 03:36:59.219245 456828 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 03:36:59.219258 456828 out.go:374] Setting ErrFile to fd 2...
I1124 03:36:59.219263 456828 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 03:36:59.219545 456828 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21975-255205/.minikube/bin
I1124 03:36:59.220001 456828 out.go:368] Setting JSON to false
I1124 03:36:59.221277 456828 start.go:133] hostinfo: {"hostname":"ip-172-31-24-2","uptime":8348,"bootTime":1763947072,"procs":184,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"6d436adf-771e-4269-b9a3-c25fd4fca4f5"}
I1124 03:36:59.221357 456828 start.go:143] virtualization:
I1124 03:36:59.227637 456828 out.go:179] * [old-k8s-version-098965] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I1124 03:36:59.231151 456828 out.go:179] - MINIKUBE_LOCATION=21975
I1124 03:36:59.231327 456828 notify.go:221] Checking for updates...
I1124 03:36:59.238122 456828 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1124 03:36:59.241423 456828 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21975-255205/kubeconfig
I1124 03:36:59.244671 456828 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21975-255205/.minikube
I1124 03:36:59.247794 456828 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I1124 03:36:59.250835 456828 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1124 03:36:59.254529 456828 config.go:182] Loaded profile config "cert-expiration-846384": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1124 03:36:59.254702 456828 driver.go:422] Setting default libvirt URI to qemu:///system
I1124 03:36:59.294116 456828 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I1124 03:36:59.294240 456828 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 03:36:59.357118 456828 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-24 03:36:59.346945612 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pat
h:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1124 03:36:59.357230 456828 docker.go:319] overlay module found
I1124 03:36:59.360704 456828 out.go:179] * Using the docker driver based on user configuration
I1124 03:36:59.363700 456828 start.go:309] selected driver: docker
I1124 03:36:59.363727 456828 start.go:927] validating driver "docker" against <nil>
I1124 03:36:59.363759 456828 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1124 03:36:59.364561 456828 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 03:36:59.415697 456828 info.go:266] docker info: {ID:J4M5:W6MX:GOX4:4LAQ:VI7E:VJNF:J3OP:OPBH:GF7G:PPY4:WQWD:7N4L Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-24 03:36:59.406291614 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-24-2 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Pat
h:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1124 03:36:59.415854 456828 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1124 03:36:59.416110 456828 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 03:36:59.419249 456828 out.go:179] * Using Docker driver with root privileges
I1124 03:36:59.422257 456828 cni.go:84] Creating CNI manager for ""
I1124 03:36:59.422344 456828 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 03:36:59.422359 456828 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1124 03:36:59.422448 456828 start.go:353] cluster config:
{Name:old-k8s-version-098965 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-098965 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSH
AuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 03:36:59.425538 456828 out.go:179] * Starting "old-k8s-version-098965" primary control-plane node in "old-k8s-version-098965" cluster
I1124 03:36:59.428289 456828 cache.go:134] Beginning downloading kic base image for docker with containerd
I1124 03:36:59.431323 456828 out.go:179] * Pulling base image v0.0.48-1763935653-21975 ...
I1124 03:36:59.434113 456828 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 03:36:59.434165 456828 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21975-255205/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4
I1124 03:36:59.434176 456828 cache.go:65] Caching tarball of preloaded images
I1124 03:36:59.434207 456828 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 in local docker daemon
I1124 03:36:59.434261 456828 preload.go:238] Found /home/jenkins/minikube-integration/21975-255205/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I1124 03:36:59.434272 456828 cache.go:68] Finished verifying existence of preloaded tar for v1.28.0 on containerd
I1124 03:36:59.434383 456828 profile.go:143] Saving config to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/config.json ...
I1124 03:36:59.434401 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/config.json: {Name:mk69515acb07727840b36c87604cba4bd531db8a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:36:59.454350 456828 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 in local docker daemon, skipping pull
I1124 03:36:59.454376 456828 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 exists in daemon, skipping load
I1124 03:36:59.454396 456828 cache.go:243] Successfully downloaded all kic artifacts
I1124 03:36:59.454427 456828 start.go:360] acquireMachinesLock for old-k8s-version-098965: {Name:mkfaf6c0e20ffd0f03bcaf5e2568b90f1af41e0c Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 03:36:59.454522 456828 start.go:364] duration metric: took 80.46µs to acquireMachinesLock for "old-k8s-version-098965"
I1124 03:36:59.454546 456828 start.go:93] Provisioning new machine with config: &{Name:old-k8s-version-098965 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-098965 Namespace:default APIServerHAVIP:
APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:f
alse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 03:36:59.454620 456828 start.go:125] createHost starting for "" (driver="docker")
I1124 03:36:59.460555 456828 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1124 03:36:59.460925 456828 start.go:159] libmachine.API.Create for "old-k8s-version-098965" (driver="docker")
I1124 03:36:59.460965 456828 client.go:173] LocalClient.Create starting
I1124 03:36:59.461101 456828 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca.pem
I1124 03:36:59.461190 456828 main.go:143] libmachine: Decoding PEM data...
I1124 03:36:59.461213 456828 main.go:143] libmachine: Parsing certificate...
I1124 03:36:59.461265 456828 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21975-255205/.minikube/certs/cert.pem
I1124 03:36:59.461289 456828 main.go:143] libmachine: Decoding PEM data...
I1124 03:36:59.461301 456828 main.go:143] libmachine: Parsing certificate...
I1124 03:36:59.461680 456828 cli_runner.go:164] Run: docker network inspect old-k8s-version-098965 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1124 03:36:59.477952 456828 cli_runner.go:211] docker network inspect old-k8s-version-098965 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1124 03:36:59.478060 456828 network_create.go:284] running [docker network inspect old-k8s-version-098965] to gather additional debugging logs...
I1124 03:36:59.478084 456828 cli_runner.go:164] Run: docker network inspect old-k8s-version-098965
W1124 03:36:59.501264 456828 cli_runner.go:211] docker network inspect old-k8s-version-098965 returned with exit code 1
I1124 03:36:59.501311 456828 network_create.go:287] error running [docker network inspect old-k8s-version-098965]: docker network inspect old-k8s-version-098965: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-098965 not found
I1124 03:36:59.501326 456828 network_create.go:289] output of [docker network inspect old-k8s-version-098965]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-098965 not found
** /stderr **
I1124 03:36:59.501444 456828 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 03:36:59.520261 456828 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-752aaa40bb3d IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:3a:00:20:e4:71:15} reservation:<nil>}
I1124 03:36:59.520804 456828 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-cbb0dee281db IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:76:ff:07:3e:91:0f} reservation:<nil>}
I1124 03:36:59.521086 456828 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-d95ffec60547 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:fe:b5:f2:ed:07:1e} reservation:<nil>}
I1124 03:36:59.521451 456828 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-1b3e5c8c3c27 IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:7e:8b:7b:bd:23:4e} reservation:<nil>}
I1124 03:36:59.521977 456828 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001a3e7d0}
I1124 03:36:59.522000 456828 network_create.go:124] attempt to create docker network old-k8s-version-098965 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1124 03:36:59.522073 456828 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-098965 old-k8s-version-098965
I1124 03:36:59.585194 456828 network_create.go:108] docker network old-k8s-version-098965 192.168.85.0/24 created
I1124 03:36:59.585224 456828 kic.go:121] calculated static IP "192.168.85.2" for the "old-k8s-version-098965" container
I1124 03:36:59.585319 456828 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1124 03:36:59.601540 456828 cli_runner.go:164] Run: docker volume create old-k8s-version-098965 --label name.minikube.sigs.k8s.io=old-k8s-version-098965 --label created_by.minikube.sigs.k8s.io=true
I1124 03:36:59.619479 456828 oci.go:103] Successfully created a docker volume old-k8s-version-098965
I1124 03:36:59.619575 456828 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-098965-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-098965 --entrypoint /usr/bin/test -v old-k8s-version-098965:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 -d /var/lib
I1124 03:37:00.531368 456828 oci.go:107] Successfully prepared a docker volume old-k8s-version-098965
I1124 03:37:00.531432 456828 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 03:37:00.531457 456828 kic.go:194] Starting extracting preloaded images to volume ...
I1124 03:37:00.531528 456828 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21975-255205/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-098965:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 -I lz4 -xf /preloaded.tar -C /extractDir
I1124 03:37:06.094759 456828 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21975-255205/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-098965:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 -I lz4 -xf /preloaded.tar -C /extractDir: (5.56319468s)
I1124 03:37:06.094794 456828 kic.go:203] duration metric: took 5.563348412s to extract preloaded images to volume ...
W1124 03:37:06.094942 456828 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1124 03:37:06.095054 456828 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1124 03:37:06.151713 456828 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-098965 --name old-k8s-version-098965 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-098965 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-098965 --network old-k8s-version-098965 --ip 192.168.85.2 --volume old-k8s-version-098965:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787
I1124 03:37:06.468910 456828 cli_runner.go:164] Run: docker container inspect old-k8s-version-098965 --format={{.State.Running}}
I1124 03:37:06.491115 456828 cli_runner.go:164] Run: docker container inspect old-k8s-version-098965 --format={{.State.Status}}
I1124 03:37:06.525628 456828 cli_runner.go:164] Run: docker exec old-k8s-version-098965 stat /var/lib/dpkg/alternatives/iptables
I1124 03:37:06.579576 456828 oci.go:144] the created container "old-k8s-version-098965" has a running status.
I1124 03:37:06.579609 456828 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa...
I1124 03:37:06.729919 456828 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1124 03:37:06.749775 456828 cli_runner.go:164] Run: docker container inspect old-k8s-version-098965 --format={{.State.Status}}
I1124 03:37:06.775133 456828 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1124 03:37:06.775157 456828 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-098965 chown docker:docker /home/docker/.ssh/authorized_keys]
I1124 03:37:06.826229 456828 cli_runner.go:164] Run: docker container inspect old-k8s-version-098965 --format={{.State.Status}}
I1124 03:37:06.849606 456828 machine.go:94] provisionDockerMachine start ...
I1124 03:37:06.849724 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:06.876665 456828 main.go:143] libmachine: Using SSH client type: native
I1124 03:37:06.877012 456828 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33418 <nil> <nil>}
I1124 03:37:06.877028 456828 main.go:143] libmachine: About to run SSH command:
hostname
I1124 03:37:06.877699 456828 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:43434->127.0.0.1:33418: read: connection reset by peer
I1124 03:37:10.029639 456828 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-098965
I1124 03:37:10.029683 456828 ubuntu.go:182] provisioning hostname "old-k8s-version-098965"
I1124 03:37:10.029771 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:10.053348 456828 main.go:143] libmachine: Using SSH client type: native
I1124 03:37:10.053702 456828 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33418 <nil> <nil>}
I1124 03:37:10.053721 456828 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-098965 && echo "old-k8s-version-098965" | sudo tee /etc/hostname
I1124 03:37:10.214587 456828 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-098965
I1124 03:37:10.214746 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:10.234062 456828 main.go:143] libmachine: Using SSH client type: native
I1124 03:37:10.234384 456828 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3dad70] 0x3dd270 <nil> [] 0s} 127.0.0.1 33418 <nil> <nil>}
I1124 03:37:10.234408 456828 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-098965' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-098965/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-098965' | sudo tee -a /etc/hosts;
fi
fi
I1124 03:37:10.384740 456828 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1124 03:37:10.384768 456828 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21975-255205/.minikube CaCertPath:/home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21975-255205/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21975-255205/.minikube}
I1124 03:37:10.384789 456828 ubuntu.go:190] setting up certificates
I1124 03:37:10.384814 456828 provision.go:84] configureAuth start
I1124 03:37:10.384887 456828 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-098965
I1124 03:37:10.402644 456828 provision.go:143] copyHostCerts
I1124 03:37:10.402723 456828 exec_runner.go:144] found /home/jenkins/minikube-integration/21975-255205/.minikube/ca.pem, removing ...
I1124 03:37:10.402738 456828 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21975-255205/.minikube/ca.pem
I1124 03:37:10.402815 456828 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21975-255205/.minikube/ca.pem (1078 bytes)
I1124 03:37:10.402908 456828 exec_runner.go:144] found /home/jenkins/minikube-integration/21975-255205/.minikube/cert.pem, removing ...
I1124 03:37:10.402916 456828 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21975-255205/.minikube/cert.pem
I1124 03:37:10.402943 456828 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21975-255205/.minikube/cert.pem (1123 bytes)
I1124 03:37:10.403038 456828 exec_runner.go:144] found /home/jenkins/minikube-integration/21975-255205/.minikube/key.pem, removing ...
I1124 03:37:10.403049 456828 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21975-255205/.minikube/key.pem
I1124 03:37:10.403076 456828 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21975-255205/.minikube/key.pem (1675 bytes)
I1124 03:37:10.403135 456828 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21975-255205/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-098965 san=[127.0.0.1 192.168.85.2 localhost minikube old-k8s-version-098965]
I1124 03:37:10.629214 456828 provision.go:177] copyRemoteCerts
I1124 03:37:10.629287 456828 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1124 03:37:10.629356 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:10.650240 456828 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33418 SSHKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa Username:docker}
I1124 03:37:10.757128 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1124 03:37:10.776341 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1124 03:37:10.795643 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1124 03:37:10.814914 456828 provision.go:87] duration metric: took 430.069497ms to configureAuth
I1124 03:37:10.814945 456828 ubuntu.go:206] setting minikube options for container-runtime
I1124 03:37:10.815151 456828 config.go:182] Loaded profile config "old-k8s-version-098965": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 03:37:10.815164 456828 machine.go:97] duration metric: took 3.965526971s to provisionDockerMachine
I1124 03:37:10.815172 456828 client.go:176] duration metric: took 11.35420095s to LocalClient.Create
I1124 03:37:10.815193 456828 start.go:167] duration metric: took 11.354269562s to libmachine.API.Create "old-k8s-version-098965"
I1124 03:37:10.815206 456828 start.go:293] postStartSetup for "old-k8s-version-098965" (driver="docker")
I1124 03:37:10.815216 456828 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1124 03:37:10.815268 456828 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1124 03:37:10.815313 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:10.835952 456828 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33418 SSHKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa Username:docker}
I1124 03:37:10.940940 456828 ssh_runner.go:195] Run: cat /etc/os-release
I1124 03:37:10.944392 456828 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1124 03:37:10.944420 456828 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1124 03:37:10.944432 456828 filesync.go:126] Scanning /home/jenkins/minikube-integration/21975-255205/.minikube/addons for local assets ...
I1124 03:37:10.944531 456828 filesync.go:126] Scanning /home/jenkins/minikube-integration/21975-255205/.minikube/files for local assets ...
I1124 03:37:10.944626 456828 filesync.go:149] local asset: /home/jenkins/minikube-integration/21975-255205/.minikube/files/etc/ssl/certs/2570692.pem -> 2570692.pem in /etc/ssl/certs
I1124 03:37:10.944739 456828 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1124 03:37:10.953432 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/files/etc/ssl/certs/2570692.pem --> /etc/ssl/certs/2570692.pem (1708 bytes)
I1124 03:37:10.971988 456828 start.go:296] duration metric: took 156.749021ms for postStartSetup
I1124 03:37:10.972390 456828 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-098965
I1124 03:37:10.990273 456828 profile.go:143] Saving config to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/config.json ...
I1124 03:37:10.990577 456828 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1124 03:37:10.990655 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:11.011483 456828 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33418 SSHKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa Username:docker}
I1124 03:37:11.114126 456828 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1124 03:37:11.120113 456828 start.go:128] duration metric: took 11.665472167s to createHost
I1124 03:37:11.120148 456828 start.go:83] releasing machines lock for "old-k8s-version-098965", held for 11.665617423s
I1124 03:37:11.120263 456828 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-098965
I1124 03:37:11.139454 456828 ssh_runner.go:195] Run: cat /version.json
I1124 03:37:11.139475 456828 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1124 03:37:11.139509 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:11.139546 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:11.159657 456828 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33418 SSHKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa Username:docker}
I1124 03:37:11.178004 456828 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33418 SSHKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa Username:docker}
I1124 03:37:11.355366 456828 ssh_runner.go:195] Run: systemctl --version
I1124 03:37:11.362316 456828 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1124 03:37:11.366848 456828 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1124 03:37:11.366938 456828 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1124 03:37:11.395828 456828 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I1124 03:37:11.395905 456828 start.go:496] detecting cgroup driver to use...
I1124 03:37:11.395958 456828 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1124 03:37:11.396051 456828 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1124 03:37:11.412427 456828 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1124 03:37:11.425664 456828 docker.go:218] disabling cri-docker service (if available) ...
I1124 03:37:11.425739 456828 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1124 03:37:11.443137 456828 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1124 03:37:11.466719 456828 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1124 03:37:11.592922 456828 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1124 03:37:11.733496 456828 docker.go:234] disabling docker service ...
I1124 03:37:11.733625 456828 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1124 03:37:11.756653 456828 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1124 03:37:11.773475 456828 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1124 03:37:11.921229 456828 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1124 03:37:12.062701 456828 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1124 03:37:12.076946 456828 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1124 03:37:12.092118 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1124 03:37:12.101736 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1124 03:37:12.111290 456828 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1124 03:37:12.111365 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1124 03:37:12.120980 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 03:37:12.130335 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1124 03:37:12.139831 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 03:37:12.149028 456828 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1124 03:37:12.158976 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1124 03:37:12.168289 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1124 03:37:12.179157 456828 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1124 03:37:12.189127 456828 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1124 03:37:12.196909 456828 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1124 03:37:12.204634 456828 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 03:37:12.341578 456828 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1124 03:37:12.476923 456828 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1124 03:37:12.477008 456828 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1124 03:37:12.481370 456828 start.go:564] Will wait 60s for crictl version
I1124 03:37:12.481445 456828 ssh_runner.go:195] Run: which crictl
I1124 03:37:12.485391 456828 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1124 03:37:12.516194 456828 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1124 03:37:12.516279 456828 ssh_runner.go:195] Run: containerd --version
I1124 03:37:12.539070 456828 ssh_runner.go:195] Run: containerd --version
I1124 03:37:12.568276 456828 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1124 03:37:12.571173 456828 cli_runner.go:164] Run: docker network inspect old-k8s-version-098965 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 03:37:12.589022 456828 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I1124 03:37:12.593373 456828 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 03:37:12.603275 456828 kubeadm.go:884] updating cluster {Name:old-k8s-version-098965 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-098965 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1124 03:37:12.603400 456828 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 03:37:12.603468 456828 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 03:37:12.632602 456828 containerd.go:627] all images are preloaded for containerd runtime.
I1124 03:37:12.632625 456828 containerd.go:534] Images already preloaded, skipping extraction
I1124 03:37:12.632687 456828 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 03:37:12.658408 456828 containerd.go:627] all images are preloaded for containerd runtime.
I1124 03:37:12.658429 456828 cache_images.go:86] Images are preloaded, skipping loading
I1124 03:37:12.658437 456828 kubeadm.go:935] updating node { 192.168.85.2 8443 v1.28.0 containerd true true} ...
I1124 03:37:12.658540 456828 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-098965 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-098965 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1124 03:37:12.658617 456828 ssh_runner.go:195] Run: sudo crictl info
I1124 03:37:12.684758 456828 cni.go:84] Creating CNI manager for ""
I1124 03:37:12.684785 456828 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 03:37:12.684799 456828 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1124 03:37:12.684823 456828 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-098965 NodeName:old-k8s-version-098965 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt
StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1124 03:37:12.684947 456828 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-098965"
kubeletExtraArgs:
node-ip: 192.168.85.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1124 03:37:12.685018 456828 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1124 03:37:12.693523 456828 binaries.go:51] Found k8s binaries, skipping transfer
I1124 03:37:12.693645 456828 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1124 03:37:12.702042 456828 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1124 03:37:12.715991 456828 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1124 03:37:12.729770 456828 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2176 bytes)
I1124 03:37:12.743578 456828 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I1124 03:37:12.747534 456828 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 03:37:12.757715 456828 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 03:37:12.884405 456828 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 03:37:12.905132 456828 certs.go:69] Setting up /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965 for IP: 192.168.85.2
I1124 03:37:12.905158 456828 certs.go:195] generating shared ca certs ...
I1124 03:37:12.905175 456828 certs.go:227] acquiring lock for ca certs: {Name:mk7774f5066ddc2da4b4108ade01c52c4ed6acef Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:12.905388 456828 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21975-255205/.minikube/ca.key
I1124 03:37:12.905463 456828 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21975-255205/.minikube/proxy-client-ca.key
I1124 03:37:12.905478 456828 certs.go:257] generating profile certs ...
I1124 03:37:12.905558 456828 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/client.key
I1124 03:37:12.905577 456828 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/client.crt with IP's: []
I1124 03:37:13.092952 456828 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/client.crt ...
I1124 03:37:13.092989 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/client.crt: {Name:mkdd0fa6209ccf6aa2aa41557354bcbc75868f78 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:13.093227 456828 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/client.key ...
I1124 03:37:13.093245 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/client.key: {Name:mk9ee935a6f1a8dd6673b97d66ec46cca5ad1664 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:13.093351 456828 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.key.56338243
I1124 03:37:13.093373 456828 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.crt.56338243 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.85.2]
I1124 03:37:13.449033 456828 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.crt.56338243 ...
I1124 03:37:13.449067 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.crt.56338243: {Name:mk65bad85814a0a12971d39286d0e5c451efbbb6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:13.449251 456828 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.key.56338243 ...
I1124 03:37:13.449269 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.key.56338243: {Name:mk64b57d615101ac92823627ae52dbd8c44bfea8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:13.449353 456828 certs.go:382] copying /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.crt.56338243 -> /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.crt
I1124 03:37:13.449436 456828 certs.go:386] copying /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.key.56338243 -> /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.key
I1124 03:37:13.449500 456828 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.key
I1124 03:37:13.449520 456828 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.crt with IP's: []
I1124 03:37:13.614481 456828 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.crt ...
I1124 03:37:13.614513 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.crt: {Name:mk7045112c74be0d05a12bbf47e455d86596546e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:13.614698 456828 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.key ...
I1124 03:37:13.614719 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.key: {Name:mkbadb2ce2b4c7ecb9f7755942cb7ff8139714e8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:13.614923 456828 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/257069.pem (1338 bytes)
W1124 03:37:13.614972 456828 certs.go:480] ignoring /home/jenkins/minikube-integration/21975-255205/.minikube/certs/257069_empty.pem, impossibly tiny 0 bytes
I1124 03:37:13.614987 456828 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca-key.pem (1675 bytes)
I1124 03:37:13.615022 456828 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/ca.pem (1078 bytes)
I1124 03:37:13.615052 456828 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/cert.pem (1123 bytes)
I1124 03:37:13.615080 456828 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-255205/.minikube/certs/key.pem (1675 bytes)
I1124 03:37:13.615129 456828 certs.go:484] found cert: /home/jenkins/minikube-integration/21975-255205/.minikube/files/etc/ssl/certs/2570692.pem (1708 bytes)
I1124 03:37:13.615732 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1124 03:37:13.637058 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1124 03:37:13.656196 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1124 03:37:13.675513 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1124 03:37:13.694987 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1124 03:37:13.714921 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1124 03:37:13.733764 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1124 03:37:13.753359 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/profiles/old-k8s-version-098965/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1124 03:37:13.772856 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/certs/257069.pem --> /usr/share/ca-certificates/257069.pem (1338 bytes)
I1124 03:37:13.791271 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/files/etc/ssl/certs/2570692.pem --> /usr/share/ca-certificates/2570692.pem (1708 bytes)
I1124 03:37:13.810674 456828 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21975-255205/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1124 03:37:13.830260 456828 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1124 03:37:13.845031 456828 ssh_runner.go:195] Run: openssl version
I1124 03:37:13.854200 456828 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/257069.pem && ln -fs /usr/share/ca-certificates/257069.pem /etc/ssl/certs/257069.pem"
I1124 03:37:13.864090 456828 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/257069.pem
I1124 03:37:13.868253 456828 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 24 02:58 /usr/share/ca-certificates/257069.pem
I1124 03:37:13.868333 456828 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/257069.pem
I1124 03:37:13.910904 456828 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/257069.pem /etc/ssl/certs/51391683.0"
I1124 03:37:13.920025 456828 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/2570692.pem && ln -fs /usr/share/ca-certificates/2570692.pem /etc/ssl/certs/2570692.pem"
I1124 03:37:13.928734 456828 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2570692.pem
I1124 03:37:13.932666 456828 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 24 02:58 /usr/share/ca-certificates/2570692.pem
I1124 03:37:13.932765 456828 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2570692.pem
I1124 03:37:13.979663 456828 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/2570692.pem /etc/ssl/certs/3ec20f2e.0"
I1124 03:37:13.988918 456828 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1124 03:37:13.998028 456828 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1124 03:37:14.003766 456828 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 24 02:51 /usr/share/ca-certificates/minikubeCA.pem
I1124 03:37:14.003942 456828 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1124 03:37:14.050814 456828 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1124 03:37:14.059590 456828 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1124 03:37:14.063378 456828 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1124 03:37:14.063446 456828 kubeadm.go:401] StartCluster: {Name:old-k8s-version-098965 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763935653-21975@sha256:5273d148037cfb860f8152fbd08072e6c1f4b37ff9a51956a3c12965f5f2d787 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-098965 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 03:37:14.063519 456828 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1124 03:37:14.063584 456828 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1124 03:37:14.095036 456828 cri.go:89] found id: ""
I1124 03:37:14.095112 456828 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1124 03:37:14.103077 456828 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1124 03:37:14.111415 456828 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1124 03:37:14.111511 456828 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1124 03:37:14.120533 456828 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1124 03:37:14.120557 456828 kubeadm.go:158] found existing configuration files:
I1124 03:37:14.120636 456828 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1124 03:37:14.129223 456828 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1124 03:37:14.129299 456828 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1124 03:37:14.137169 456828 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1124 03:37:14.145264 456828 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1124 03:37:14.145326 456828 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1124 03:37:14.153440 456828 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1124 03:37:14.161802 456828 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1124 03:37:14.161868 456828 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1124 03:37:14.169170 456828 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1124 03:37:14.176729 456828 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1124 03:37:14.176794 456828 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1124 03:37:14.184164 456828 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1124 03:37:14.235215 456828 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1124 03:37:14.235279 456828 kubeadm.go:319] [preflight] Running pre-flight checks
I1124 03:37:14.275786 456828 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1124 03:37:14.275863 456828 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I1124 03:37:14.275904 456828 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1124 03:37:14.275954 456828 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1124 03:37:14.276007 456828 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1124 03:37:14.276058 456828 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1124 03:37:14.276111 456828 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1124 03:37:14.276161 456828 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1124 03:37:14.276213 456828 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1124 03:37:14.276262 456828 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1124 03:37:14.276314 456828 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1124 03:37:14.276364 456828 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1124 03:37:14.361031 456828 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1124 03:37:14.361200 456828 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1124 03:37:14.361333 456828 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1124 03:37:14.534299 456828 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1124 03:37:14.537587 456828 out.go:252] - Generating certificates and keys ...
I1124 03:37:14.537751 456828 kubeadm.go:319] [certs] Using existing ca certificate authority
I1124 03:37:14.537876 456828 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1124 03:37:15.136064 456828 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1124 03:37:15.790461 456828 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1124 03:37:16.745198 456828 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1124 03:37:17.101081 456828 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1124 03:37:17.816844 456828 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1124 03:37:17.817225 456828 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-098965] and IPs [192.168.85.2 127.0.0.1 ::1]
I1124 03:37:18.708622 456828 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1124 03:37:18.708941 456828 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-098965] and IPs [192.168.85.2 127.0.0.1 ::1]
I1124 03:37:19.626997 456828 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1124 03:37:20.013744 456828 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1124 03:37:21.332223 456828 kubeadm.go:319] [certs] Generating "sa" key and public key
I1124 03:37:21.333010 456828 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1124 03:37:21.538924 456828 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1124 03:37:21.950934 456828 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1124 03:37:23.178695 456828 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1124 03:37:23.307692 456828 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1124 03:37:23.308662 456828 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1124 03:37:23.312055 456828 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1124 03:37:23.317674 456828 out.go:252] - Booting up control plane ...
I1124 03:37:23.317788 456828 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1124 03:37:23.317867 456828 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1124 03:37:23.317934 456828 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1124 03:37:23.338190 456828 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1124 03:37:23.339603 456828 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1124 03:37:23.339662 456828 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1124 03:37:23.480314 456828 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1124 03:37:31.483609 456828 kubeadm.go:319] [apiclient] All control plane components are healthy after 8.003694 seconds
I1124 03:37:31.483744 456828 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1124 03:37:31.502417 456828 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1124 03:37:32.033208 456828 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1124 03:37:32.033430 456828 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-098965 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1124 03:37:32.546541 456828 kubeadm.go:319] [bootstrap-token] Using token: ycw9qc.7i65x4n1zr1z1k2d
I1124 03:37:32.549515 456828 out.go:252] - Configuring RBAC rules ...
I1124 03:37:32.549646 456828 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1124 03:37:32.555568 456828 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1124 03:37:32.568444 456828 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1124 03:37:32.576351 456828 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1124 03:37:32.581974 456828 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1124 03:37:32.586465 456828 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1124 03:37:32.604043 456828 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1124 03:37:32.913255 456828 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1124 03:37:32.963682 456828 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1124 03:37:32.981293 456828 kubeadm.go:319]
I1124 03:37:32.981375 456828 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1124 03:37:32.981391 456828 kubeadm.go:319]
I1124 03:37:32.981470 456828 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1124 03:37:32.981490 456828 kubeadm.go:319]
I1124 03:37:32.981515 456828 kubeadm.go:319] mkdir -p $HOME/.kube
I1124 03:37:32.983497 456828 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1124 03:37:32.983565 456828 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1124 03:37:32.983572 456828 kubeadm.go:319]
I1124 03:37:32.983653 456828 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1124 03:37:32.983663 456828 kubeadm.go:319]
I1124 03:37:32.983715 456828 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1124 03:37:32.983723 456828 kubeadm.go:319]
I1124 03:37:32.983775 456828 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1124 03:37:32.983853 456828 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1124 03:37:32.983929 456828 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1124 03:37:32.983938 456828 kubeadm.go:319]
I1124 03:37:32.984037 456828 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1124 03:37:32.984117 456828 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1124 03:37:32.984123 456828 kubeadm.go:319]
I1124 03:37:32.984216 456828 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token ycw9qc.7i65x4n1zr1z1k2d \
I1124 03:37:32.984336 456828 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:7c8724c9df7bddf0d2f355149f7d996f734006ccfb255d81436a9364083c5f40 \
I1124 03:37:32.984390 456828 kubeadm.go:319] --control-plane
I1124 03:37:32.984397 456828 kubeadm.go:319]
I1124 03:37:32.984704 456828 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1124 03:37:32.984716 456828 kubeadm.go:319]
I1124 03:37:32.984916 456828 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token ycw9qc.7i65x4n1zr1z1k2d \
I1124 03:37:32.985043 456828 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:7c8724c9df7bddf0d2f355149f7d996f734006ccfb255d81436a9364083c5f40
I1124 03:37:32.991944 456828 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I1124 03:37:32.992068 456828 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1124 03:37:32.992094 456828 cni.go:84] Creating CNI manager for ""
I1124 03:37:32.992102 456828 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 03:37:32.995225 456828 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1124 03:37:32.998096 456828 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1124 03:37:33.004093 456828 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1124 03:37:33.004119 456828 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1124 03:37:33.036441 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1124 03:37:34.155879 456828 ssh_runner.go:235] Completed: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml: (1.119390679s)
I1124 03:37:34.155921 456828 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1124 03:37:34.156043 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-098965 minikube.k8s.io/updated_at=2025_11_24T03_37_34_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=525fef2394fe4854b27b3c3385e33403fd802864 minikube.k8s.io/name=old-k8s-version-098965 minikube.k8s.io/primary=true
I1124 03:37:34.156059 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:34.370014 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:34.370081 456828 ops.go:34] apiserver oom_adj: -16
I1124 03:37:34.870621 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:35.370425 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:35.870744 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:36.370591 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:36.870102 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:37.370755 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:37.870481 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:38.370729 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:38.870716 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:39.370861 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:39.870112 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:40.370985 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:40.870131 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:41.370224 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:41.870910 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:42.370129 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:42.870708 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:43.370299 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:43.870132 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:44.370373 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:44.870148 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:45.370873 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:45.870208 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:46.370930 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:46.870103 456828 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 03:37:46.965988 456828 kubeadm.go:1114] duration metric: took 12.810009639s to wait for elevateKubeSystemPrivileges
I1124 03:37:46.966014 456828 kubeadm.go:403] duration metric: took 32.902577839s to StartCluster
I1124 03:37:46.966033 456828 settings.go:142] acquiring lock: {Name:mk06b563e5bc383cd64ed92ea3d8ac6aac195923 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:46.966096 456828 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21975-255205/kubeconfig
I1124 03:37:46.967091 456828 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21975-255205/kubeconfig: {Name:mk59b88a9b5c6c93f7412b3f64976d4efe64bdb2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 03:37:46.967316 456828 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 03:37:46.967431 456828 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1124 03:37:46.967681 456828 config.go:182] Loaded profile config "old-k8s-version-098965": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 03:37:46.967714 456828 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1124 03:37:46.967774 456828 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-098965"
I1124 03:37:46.967788 456828 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-098965"
I1124 03:37:46.967809 456828 host.go:66] Checking if "old-k8s-version-098965" exists ...
I1124 03:37:46.968568 456828 cli_runner.go:164] Run: docker container inspect old-k8s-version-098965 --format={{.State.Status}}
I1124 03:37:46.969265 456828 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-098965"
I1124 03:37:46.969293 456828 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-098965"
I1124 03:37:46.969620 456828 cli_runner.go:164] Run: docker container inspect old-k8s-version-098965 --format={{.State.Status}}
I1124 03:37:46.976026 456828 out.go:179] * Verifying Kubernetes components...
I1124 03:37:46.980332 456828 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 03:37:47.005475 456828 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-098965"
I1124 03:37:47.005521 456828 host.go:66] Checking if "old-k8s-version-098965" exists ...
I1124 03:37:47.006016 456828 cli_runner.go:164] Run: docker container inspect old-k8s-version-098965 --format={{.State.Status}}
I1124 03:37:47.021223 456828 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1124 03:37:47.025797 456828 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1124 03:37:47.025822 456828 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1124 03:37:47.025899 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:47.043575 456828 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1124 03:37:47.043596 456828 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1124 03:37:47.043662 456828 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-098965
I1124 03:37:47.067444 456828 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33418 SSHKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa Username:docker}
I1124 03:37:47.085937 456828 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33418 SSHKeyPath:/home/jenkins/minikube-integration/21975-255205/.minikube/machines/old-k8s-version-098965/id_rsa Username:docker}
I1124 03:37:47.279804 456828 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.85.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1124 03:37:47.286358 456828 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 03:37:47.448103 456828 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1124 03:37:47.467412 456828 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1124 03:37:48.289350 456828 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (1.002895345s)
I1124 03:37:48.290385 456828 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-098965" to be "Ready" ...
I1124 03:37:48.311715 456828 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.85.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.031826007s)
I1124 03:37:48.311750 456828 start.go:977] {"host.minikube.internal": 192.168.85.1} host record injected into CoreDNS's ConfigMap
I1124 03:37:48.801783 456828 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.334276635s)
I1124 03:37:48.805329 456828 out.go:179] * Enabled addons: default-storageclass, storage-provisioner
I1124 03:37:48.808579 456828 addons.go:530] duration metric: took 1.840852722s for enable addons: enabled=[default-storageclass storage-provisioner]
I1124 03:37:48.816214 456828 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-098965" context rescaled to 1 replicas
W1124 03:37:50.294251 456828 node_ready.go:57] node "old-k8s-version-098965" has "Ready":"False" status (will retry)
W1124 03:37:52.793560 456828 node_ready.go:57] node "old-k8s-version-098965" has "Ready":"False" status (will retry)
W1124 03:37:54.793903 456828 node_ready.go:57] node "old-k8s-version-098965" has "Ready":"False" status (will retry)
W1124 03:37:56.794298 456828 node_ready.go:57] node "old-k8s-version-098965" has "Ready":"False" status (will retry)
W1124 03:37:59.293536 456828 node_ready.go:57] node "old-k8s-version-098965" has "Ready":"False" status (will retry)
I1124 03:38:00.344278 456828 node_ready.go:49] node "old-k8s-version-098965" is "Ready"
I1124 03:38:00.344383 456828 node_ready.go:38] duration metric: took 12.053923317s for node "old-k8s-version-098965" to be "Ready" ...
I1124 03:38:00.344417 456828 api_server.go:52] waiting for apiserver process to appear ...
I1124 03:38:00.344536 456828 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1124 03:38:00.402952 456828 api_server.go:72] duration metric: took 13.435606359s to wait for apiserver process to appear ...
I1124 03:38:00.402979 456828 api_server.go:88] waiting for apiserver healthz status ...
I1124 03:38:00.403000 456828 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1124 03:38:00.414315 456828 api_server.go:279] https://192.168.85.2:8443/healthz returned 200:
ok
I1124 03:38:00.416457 456828 api_server.go:141] control plane version: v1.28.0
I1124 03:38:00.416601 456828 api_server.go:131] duration metric: took 13.613451ms to wait for apiserver health ...
I1124 03:38:00.416631 456828 system_pods.go:43] waiting for kube-system pods to appear ...
I1124 03:38:00.425631 456828 system_pods.go:59] 8 kube-system pods found
I1124 03:38:00.425724 456828 system_pods.go:61] "coredns-5dd5756b68-2kmf2" [9c6642fb-17b7-4199-b927-eb63b9a58260] Pending
I1124 03:38:00.425749 456828 system_pods.go:61] "etcd-old-k8s-version-098965" [994c486f-9839-4407-bc6d-d7c52c9dcfe7] Running
I1124 03:38:00.425783 456828 system_pods.go:61] "kindnet-mctv9" [0f0d91cd-7d64-482e-b33c-383b20f5bd79] Running
I1124 03:38:00.425810 456828 system_pods.go:61] "kube-apiserver-old-k8s-version-098965" [777b36fe-0c46-4427-90b9-ef48ae1cc287] Running
I1124 03:38:00.425830 456828 system_pods.go:61] "kube-controller-manager-old-k8s-version-098965" [3be22a1a-db9f-446f-9b0a-e61ce5482e12] Running
I1124 03:38:00.425851 456828 system_pods.go:61] "kube-proxy-5t7nq" [6050bdb0-6390-48c7-863f-520ef6277ad8] Running
I1124 03:38:00.425879 456828 system_pods.go:61] "kube-scheduler-old-k8s-version-098965" [ff509e4b-4fde-4ea0-8261-5f4463c5be01] Running
I1124 03:38:00.425909 456828 system_pods.go:61] "storage-provisioner" [9ede1da5-704c-4aab-93e0-77ce93158129] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 03:38:00.425943 456828 system_pods.go:74] duration metric: took 9.290401ms to wait for pod list to return data ...
I1124 03:38:00.425969 456828 default_sa.go:34] waiting for default service account to be created ...
I1124 03:38:00.429247 456828 default_sa.go:45] found service account: "default"
I1124 03:38:00.429360 456828 default_sa.go:55] duration metric: took 3.356866ms for default service account to be created ...
I1124 03:38:00.429393 456828 system_pods.go:116] waiting for k8s-apps to be running ...
I1124 03:38:00.435199 456828 system_pods.go:86] 8 kube-system pods found
I1124 03:38:00.435313 456828 system_pods.go:89] "coredns-5dd5756b68-2kmf2" [9c6642fb-17b7-4199-b927-eb63b9a58260] Pending
I1124 03:38:00.435337 456828 system_pods.go:89] "etcd-old-k8s-version-098965" [994c486f-9839-4407-bc6d-d7c52c9dcfe7] Running
I1124 03:38:00.435376 456828 system_pods.go:89] "kindnet-mctv9" [0f0d91cd-7d64-482e-b33c-383b20f5bd79] Running
I1124 03:38:00.435403 456828 system_pods.go:89] "kube-apiserver-old-k8s-version-098965" [777b36fe-0c46-4427-90b9-ef48ae1cc287] Running
I1124 03:38:00.435426 456828 system_pods.go:89] "kube-controller-manager-old-k8s-version-098965" [3be22a1a-db9f-446f-9b0a-e61ce5482e12] Running
I1124 03:38:00.435460 456828 system_pods.go:89] "kube-proxy-5t7nq" [6050bdb0-6390-48c7-863f-520ef6277ad8] Running
I1124 03:38:00.435482 456828 system_pods.go:89] "kube-scheduler-old-k8s-version-098965" [ff509e4b-4fde-4ea0-8261-5f4463c5be01] Running
I1124 03:38:00.435514 456828 system_pods.go:89] "storage-provisioner" [9ede1da5-704c-4aab-93e0-77ce93158129] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 03:38:00.435576 456828 retry.go:31] will retry after 251.537949ms: missing components: kube-dns
I1124 03:38:00.691897 456828 system_pods.go:86] 8 kube-system pods found
I1124 03:38:00.691936 456828 system_pods.go:89] "coredns-5dd5756b68-2kmf2" [9c6642fb-17b7-4199-b927-eb63b9a58260] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 03:38:00.691943 456828 system_pods.go:89] "etcd-old-k8s-version-098965" [994c486f-9839-4407-bc6d-d7c52c9dcfe7] Running
I1124 03:38:00.691949 456828 system_pods.go:89] "kindnet-mctv9" [0f0d91cd-7d64-482e-b33c-383b20f5bd79] Running
I1124 03:38:00.691954 456828 system_pods.go:89] "kube-apiserver-old-k8s-version-098965" [777b36fe-0c46-4427-90b9-ef48ae1cc287] Running
I1124 03:38:00.691959 456828 system_pods.go:89] "kube-controller-manager-old-k8s-version-098965" [3be22a1a-db9f-446f-9b0a-e61ce5482e12] Running
I1124 03:38:00.691968 456828 system_pods.go:89] "kube-proxy-5t7nq" [6050bdb0-6390-48c7-863f-520ef6277ad8] Running
I1124 03:38:00.691976 456828 system_pods.go:89] "kube-scheduler-old-k8s-version-098965" [ff509e4b-4fde-4ea0-8261-5f4463c5be01] Running
I1124 03:38:00.691981 456828 system_pods.go:89] "storage-provisioner" [9ede1da5-704c-4aab-93e0-77ce93158129] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 03:38:00.691999 456828 retry.go:31] will retry after 269.359214ms: missing components: kube-dns
I1124 03:38:00.970909 456828 system_pods.go:86] 8 kube-system pods found
I1124 03:38:00.970944 456828 system_pods.go:89] "coredns-5dd5756b68-2kmf2" [9c6642fb-17b7-4199-b927-eb63b9a58260] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 03:38:00.970951 456828 system_pods.go:89] "etcd-old-k8s-version-098965" [994c486f-9839-4407-bc6d-d7c52c9dcfe7] Running
I1124 03:38:00.970957 456828 system_pods.go:89] "kindnet-mctv9" [0f0d91cd-7d64-482e-b33c-383b20f5bd79] Running
I1124 03:38:00.970961 456828 system_pods.go:89] "kube-apiserver-old-k8s-version-098965" [777b36fe-0c46-4427-90b9-ef48ae1cc287] Running
I1124 03:38:00.970966 456828 system_pods.go:89] "kube-controller-manager-old-k8s-version-098965" [3be22a1a-db9f-446f-9b0a-e61ce5482e12] Running
I1124 03:38:00.970969 456828 system_pods.go:89] "kube-proxy-5t7nq" [6050bdb0-6390-48c7-863f-520ef6277ad8] Running
I1124 03:38:00.970973 456828 system_pods.go:89] "kube-scheduler-old-k8s-version-098965" [ff509e4b-4fde-4ea0-8261-5f4463c5be01] Running
I1124 03:38:00.970978 456828 system_pods.go:89] "storage-provisioner" [9ede1da5-704c-4aab-93e0-77ce93158129] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 03:38:00.970996 456828 retry.go:31] will retry after 426.462867ms: missing components: kube-dns
I1124 03:38:01.403286 456828 system_pods.go:86] 8 kube-system pods found
I1124 03:38:01.403315 456828 system_pods.go:89] "coredns-5dd5756b68-2kmf2" [9c6642fb-17b7-4199-b927-eb63b9a58260] Running
I1124 03:38:01.403322 456828 system_pods.go:89] "etcd-old-k8s-version-098965" [994c486f-9839-4407-bc6d-d7c52c9dcfe7] Running
I1124 03:38:01.403330 456828 system_pods.go:89] "kindnet-mctv9" [0f0d91cd-7d64-482e-b33c-383b20f5bd79] Running
I1124 03:38:01.403335 456828 system_pods.go:89] "kube-apiserver-old-k8s-version-098965" [777b36fe-0c46-4427-90b9-ef48ae1cc287] Running
I1124 03:38:01.403341 456828 system_pods.go:89] "kube-controller-manager-old-k8s-version-098965" [3be22a1a-db9f-446f-9b0a-e61ce5482e12] Running
I1124 03:38:01.403345 456828 system_pods.go:89] "kube-proxy-5t7nq" [6050bdb0-6390-48c7-863f-520ef6277ad8] Running
I1124 03:38:01.403349 456828 system_pods.go:89] "kube-scheduler-old-k8s-version-098965" [ff509e4b-4fde-4ea0-8261-5f4463c5be01] Running
I1124 03:38:01.403353 456828 system_pods.go:89] "storage-provisioner" [9ede1da5-704c-4aab-93e0-77ce93158129] Running
I1124 03:38:01.403362 456828 system_pods.go:126] duration metric: took 973.897592ms to wait for k8s-apps to be running ...
I1124 03:38:01.403373 456828 system_svc.go:44] waiting for kubelet service to be running ....
I1124 03:38:01.403427 456828 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 03:38:01.421738 456828 system_svc.go:56] duration metric: took 18.35448ms WaitForService to wait for kubelet
I1124 03:38:01.421765 456828 kubeadm.go:587] duration metric: took 14.454425317s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 03:38:01.421786 456828 node_conditions.go:102] verifying NodePressure condition ...
I1124 03:38:01.425010 456828 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1124 03:38:01.425044 456828 node_conditions.go:123] node cpu capacity is 2
I1124 03:38:01.425059 456828 node_conditions.go:105] duration metric: took 3.267233ms to run NodePressure ...
I1124 03:38:01.425099 456828 start.go:242] waiting for startup goroutines ...
I1124 03:38:01.425108 456828 start.go:247] waiting for cluster config update ...
I1124 03:38:01.425124 456828 start.go:256] writing updated cluster config ...
I1124 03:38:01.425448 456828 ssh_runner.go:195] Run: rm -f paused
I1124 03:38:01.429212 456828 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 03:38:01.435249 456828 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-2kmf2" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:01.447218 456828 pod_ready.go:94] pod "coredns-5dd5756b68-2kmf2" is "Ready"
I1124 03:38:01.447254 456828 pod_ready.go:86] duration metric: took 11.97007ms for pod "coredns-5dd5756b68-2kmf2" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:01.452465 456828 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:01.459099 456828 pod_ready.go:94] pod "etcd-old-k8s-version-098965" is "Ready"
I1124 03:38:01.459128 456828 pod_ready.go:86] duration metric: took 6.576599ms for pod "etcd-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:01.471032 456828 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:01.476662 456828 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-098965" is "Ready"
I1124 03:38:01.476688 456828 pod_ready.go:86] duration metric: took 5.56861ms for pod "kube-apiserver-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:01.480096 456828 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:01.833649 456828 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-098965" is "Ready"
I1124 03:38:01.833715 456828 pod_ready.go:86] duration metric: took 353.588012ms for pod "kube-controller-manager-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:02.035335 456828 pod_ready.go:83] waiting for pod "kube-proxy-5t7nq" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:02.433941 456828 pod_ready.go:94] pod "kube-proxy-5t7nq" is "Ready"
I1124 03:38:02.433973 456828 pod_ready.go:86] duration metric: took 398.560828ms for pod "kube-proxy-5t7nq" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:02.633735 456828 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:03.033530 456828 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-098965" is "Ready"
I1124 03:38:03.033561 456828 pod_ready.go:86] duration metric: took 399.801466ms for pod "kube-scheduler-old-k8s-version-098965" in "kube-system" namespace to be "Ready" or be gone ...
I1124 03:38:03.033575 456828 pod_ready.go:40] duration metric: took 1.604321281s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 03:38:03.103182 456828 start.go:625] kubectl: 1.33.2, cluster: 1.28.0 (minor skew: 5)
I1124 03:38:03.106579 456828 out.go:203]
W1124 03:38:03.109581 456828 out.go:285] ! /usr/local/bin/kubectl is version 1.33.2, which may have incompatibilities with Kubernetes 1.28.0.
I1124 03:38:03.112629 456828 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1124 03:38:03.116685 456828 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-098965" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
2473c87591ead 1611cd07b61d5 8 seconds ago Running busybox 0 0780548608168 busybox default
32ab776c7affb ba04bb24b9575 13 seconds ago Running storage-provisioner 0 ddcd48a171630 storage-provisioner kube-system
28a52e8d1e9e4 97e04611ad434 13 seconds ago Running coredns 0 aa01d3a3f7cba coredns-5dd5756b68-2kmf2 kube-system
37f20e76ffbc2 b1a8c6f707935 25 seconds ago Running kindnet-cni 0 2a6bd814ac01e kindnet-mctv9 kube-system
4baa8c107b38c 940f54a5bcae9 27 seconds ago Running kube-proxy 0 b85e6b6d514cc kube-proxy-5t7nq kube-system
8fb25b361e023 9cdd6470f48c8 49 seconds ago Running etcd 0 b669262c23763 etcd-old-k8s-version-098965 kube-system
666ad3b5bbcc5 00543d2fe5d71 49 seconds ago Running kube-apiserver 0 9edcf3c3e4d9e kube-apiserver-old-k8s-version-098965 kube-system
95905c97af2e4 762dce4090c5f 49 seconds ago Running kube-scheduler 0 d6f0d280dee01 kube-scheduler-old-k8s-version-098965 kube-system
94d7bde87dab5 46cc66ccc7c19 49 seconds ago Running kube-controller-manager 0 8eb2c9f965876 kube-controller-manager-old-k8s-version-098965 kube-system
==> containerd <==
Nov 24 03:38:00 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:00.893826021Z" level=info msg="Container 32ab776c7affb85bd5965dee0104d1470d0553d2b7a80e479ea0fc030ea67388: CDI devices from CRI Config.CDIDevices: []"
Nov 24 03:38:00 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:00.901529017Z" level=info msg="StartContainer for \"28a52e8d1e9e4c99322bf7f4a542d09e22eed502ede9105bfd3867fff8b743ae\""
Nov 24 03:38:00 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:00.902763625Z" level=info msg="connecting to shim 28a52e8d1e9e4c99322bf7f4a542d09e22eed502ede9105bfd3867fff8b743ae" address="unix:///run/containerd/s/70d70892534976c42f017b6a57c07c5f882e60cfc509cf351b04e5c63883f9c6" protocol=ttrpc version=3
Nov 24 03:38:00 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:00.923930845Z" level=info msg="CreateContainer within sandbox \"ddcd48a171630d558701e23e8b84d43ca3b433b204586da5fd73071e2c73cf02\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"32ab776c7affb85bd5965dee0104d1470d0553d2b7a80e479ea0fc030ea67388\""
Nov 24 03:38:00 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:00.927472048Z" level=info msg="StartContainer for \"32ab776c7affb85bd5965dee0104d1470d0553d2b7a80e479ea0fc030ea67388\""
Nov 24 03:38:00 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:00.929854567Z" level=info msg="connecting to shim 32ab776c7affb85bd5965dee0104d1470d0553d2b7a80e479ea0fc030ea67388" address="unix:///run/containerd/s/5ddd01c5f051ac256aede9694ac052a9c600e13f3e3f44d833556ac361f844c9" protocol=ttrpc version=3
Nov 24 03:38:00 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:00.986416771Z" level=info msg="StartContainer for \"28a52e8d1e9e4c99322bf7f4a542d09e22eed502ede9105bfd3867fff8b743ae\" returns successfully"
Nov 24 03:38:01 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:01.032852607Z" level=info msg="StartContainer for \"32ab776c7affb85bd5965dee0104d1470d0553d2b7a80e479ea0fc030ea67388\" returns successfully"
Nov 24 03:38:03 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:03.633430300Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:b377806c-ae20-44d2-9d0f-07b097026328,Namespace:default,Attempt:0,}"
Nov 24 03:38:03 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:03.690035484Z" level=info msg="connecting to shim 07805486081686e75b51f404a8d192120c8e44f1df35435e82a18cd840b250a6" address="unix:///run/containerd/s/62c9570c9e36a3dfb4b0454e8ff44f8873d73aec0247dc7c06a4c63bdd606e84" namespace=k8s.io protocol=ttrpc version=3
Nov 24 03:38:03 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:03.757467931Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:b377806c-ae20-44d2-9d0f-07b097026328,Namespace:default,Attempt:0,} returns sandbox id \"07805486081686e75b51f404a8d192120c8e44f1df35435e82a18cd840b250a6\""
Nov 24 03:38:03 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:03.759222049Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.797829117Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.799579082Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=1937183"
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.802353497Z" level=info msg="ImageCreate event name:\"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.809134257Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.810394309Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"1935750\" in 2.051134434s"
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.810432586Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\""
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.813949961Z" level=info msg="CreateContainer within sandbox \"07805486081686e75b51f404a8d192120c8e44f1df35435e82a18cd840b250a6\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.827500676Z" level=info msg="Container 2473c87591ead98e23e27a6582c8fc6bfb2afc235a7786ab166b053a67742968: CDI devices from CRI Config.CDIDevices: []"
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.839960013Z" level=info msg="CreateContainer within sandbox \"07805486081686e75b51f404a8d192120c8e44f1df35435e82a18cd840b250a6\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"2473c87591ead98e23e27a6582c8fc6bfb2afc235a7786ab166b053a67742968\""
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.841216955Z" level=info msg="StartContainer for \"2473c87591ead98e23e27a6582c8fc6bfb2afc235a7786ab166b053a67742968\""
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.842449240Z" level=info msg="connecting to shim 2473c87591ead98e23e27a6582c8fc6bfb2afc235a7786ab166b053a67742968" address="unix:///run/containerd/s/62c9570c9e36a3dfb4b0454e8ff44f8873d73aec0247dc7c06a4c63bdd606e84" protocol=ttrpc version=3
Nov 24 03:38:05 old-k8s-version-098965 containerd[757]: time="2025-11-24T03:38:05.911309396Z" level=info msg="StartContainer for \"2473c87591ead98e23e27a6582c8fc6bfb2afc235a7786ab166b053a67742968\" returns successfully"
Nov 24 03:38:11 old-k8s-version-098965 containerd[757]: E1124 03:38:11.465754 757 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [28a52e8d1e9e4c99322bf7f4a542d09e22eed502ede9105bfd3867fff8b743ae] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 8aa94104b4dae56b00431f7362ac05b997af2246775de35dc2eb361b0707b2fa7199f9ddfdba27fdef1331b76d09c41700f6cb5d00836dabab7c0df8e651283f
CoreDNS-1.10.1
linux/arm64, go1.20, 055b2c3
[INFO] 127.0.0.1:36019 - 46965 "HINFO IN 101273306430571101.3418018538030985896. udp 56 false 512" NXDOMAIN qr,rd,ra 56 0.022963225s
==> describe nodes <==
Name: old-k8s-version-098965
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=old-k8s-version-098965
kubernetes.io/os=linux
minikube.k8s.io/commit=525fef2394fe4854b27b3c3385e33403fd802864
minikube.k8s.io/name=old-k8s-version-098965
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_24T03_37_34_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 24 Nov 2025 03:37:29 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-098965
AcquireTime: <unset>
RenewTime: Mon, 24 Nov 2025 03:38:13 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 24 Nov 2025 03:38:03 +0000 Mon, 24 Nov 2025 03:37:25 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 24 Nov 2025 03:38:03 +0000 Mon, 24 Nov 2025 03:37:25 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 24 Nov 2025 03:38:03 +0000 Mon, 24 Nov 2025 03:37:25 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 24 Nov 2025 03:38:03 +0000 Mon, 24 Nov 2025 03:38:00 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.85.2
Hostname: old-k8s-version-098965
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 304a86241bf1bbb85bd31db5692386d7
System UUID: 016e6bb7-0740-4efc-ad46-1814703763df
Boot ID: 63a8a852-1462-44b1-9d6f-f77d26e8568f
Kernel Version: 5.15.0-1084-aws
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11s
kube-system coredns-5dd5756b68-2kmf2 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 28s
kube-system etcd-old-k8s-version-098965 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 41s
kube-system kindnet-mctv9 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 28s
kube-system kube-apiserver-old-k8s-version-098965 250m (12%) 0 (0%) 0 (0%) 0 (0%) 41s
kube-system kube-controller-manager-old-k8s-version-098965 200m (10%) 0 (0%) 0 (0%) 0 (0%) 44s
kube-system kube-proxy-5t7nq 0 (0%) 0 (0%) 0 (0%) 0 (0%) 28s
kube-system kube-scheduler-old-k8s-version-098965 100m (5%) 0 (0%) 0 (0%) 0 (0%) 41s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 26s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 100m (5%)
memory 220Mi (2%) 220Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 27s kube-proxy
Normal NodeHasSufficientMemory 50s (x8 over 50s) kubelet Node old-k8s-version-098965 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 50s (x8 over 50s) kubelet Node old-k8s-version-098965 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 50s (x7 over 50s) kubelet Node old-k8s-version-098965 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 50s kubelet Updated Node Allocatable limit across pods
Normal Starting 42s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 41s kubelet Node old-k8s-version-098965 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 41s kubelet Node old-k8s-version-098965 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 41s kubelet Node old-k8s-version-098965 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 41s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 29s node-controller Node old-k8s-version-098965 event: Registered Node old-k8s-version-098965 in Controller
Normal NodeReady 14s kubelet Node old-k8s-version-098965 status is now: NodeReady
==> dmesg <==
[Nov24 02:27] overlayfs: idmapped layers are currently not supported
[Nov24 02:28] overlayfs: idmapped layers are currently not supported
[Nov24 02:30] overlayfs: idmapped layers are currently not supported
[ +9.824160] overlayfs: idmapped layers are currently not supported
[Nov24 02:31] overlayfs: idmapped layers are currently not supported
[Nov24 02:32] overlayfs: idmapped layers are currently not supported
[ +27.981383] overlayfs: idmapped layers are currently not supported
[Nov24 02:33] overlayfs: idmapped layers are currently not supported
[Nov24 02:34] overlayfs: idmapped layers are currently not supported
[Nov24 02:35] overlayfs: idmapped layers are currently not supported
[Nov24 02:36] overlayfs: idmapped layers are currently not supported
[Nov24 02:37] overlayfs: idmapped layers are currently not supported
[Nov24 02:38] overlayfs: idmapped layers are currently not supported
[Nov24 02:39] overlayfs: idmapped layers are currently not supported
[ +24.837346] overlayfs: idmapped layers are currently not supported
[Nov24 02:40] overlayfs: idmapped layers are currently not supported
[ +40.823948] overlayfs: idmapped layers are currently not supported
[ +1.705989] overlayfs: idmapped layers are currently not supported
[Nov24 02:42] overlayfs: idmapped layers are currently not supported
[ +21.661904] overlayfs: idmapped layers are currently not supported
[Nov24 02:44] overlayfs: idmapped layers are currently not supported
[ +1.074777] overlayfs: idmapped layers are currently not supported
[Nov24 02:46] overlayfs: idmapped layers are currently not supported
[ +19.120392] overlayfs: idmapped layers are currently not supported
[Nov24 02:48] kauditd_printk_skb: 8 callbacks suppressed
==> etcd [8fb25b361e0239913db0778bdfb64d93fee6d1a16be3fd7f4f316e46a892bbde] <==
{"level":"info","ts":"2025-11-24T03:37:25.43693Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed switched to configuration voters=(11459225503572592365)"}
{"level":"info","ts":"2025-11-24T03:37:25.437104Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"68eaea490fab4e05","local-member-id":"9f0758e1c58a86ed","added-peer-id":"9f0758e1c58a86ed","added-peer-peer-urls":["https://192.168.85.2:2380"]}
{"level":"info","ts":"2025-11-24T03:37:25.441421Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-11-24T03:37:25.44164Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.85.2:2380"}
{"level":"info","ts":"2025-11-24T03:37:25.441821Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.85.2:2380"}
{"level":"info","ts":"2025-11-24T03:37:25.445077Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"9f0758e1c58a86ed","initial-advertise-peer-urls":["https://192.168.85.2:2380"],"listen-peer-urls":["https://192.168.85.2:2380"],"advertise-client-urls":["https://192.168.85.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.85.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-24T03:37:25.445165Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-24T03:37:25.495636Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed is starting a new election at term 1"}
{"level":"info","ts":"2025-11-24T03:37:25.495889Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-24T03:37:25.496002Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed received MsgPreVoteResp from 9f0758e1c58a86ed at term 1"}
{"level":"info","ts":"2025-11-24T03:37:25.496143Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed became candidate at term 2"}
{"level":"info","ts":"2025-11-24T03:37:25.496408Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed received MsgVoteResp from 9f0758e1c58a86ed at term 2"}
{"level":"info","ts":"2025-11-24T03:37:25.497335Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"9f0758e1c58a86ed became leader at term 2"}
{"level":"info","ts":"2025-11-24T03:37:25.497479Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: 9f0758e1c58a86ed elected leader 9f0758e1c58a86ed at term 2"}
{"level":"info","ts":"2025-11-24T03:37:25.498979Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"9f0758e1c58a86ed","local-member-attributes":"{Name:old-k8s-version-098965 ClientURLs:[https://192.168.85.2:2379]}","request-path":"/0/members/9f0758e1c58a86ed/attributes","cluster-id":"68eaea490fab4e05","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-24T03:37:25.499256Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-24T03:37:25.50078Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.85.2:2379"}
{"level":"info","ts":"2025-11-24T03:37:25.500939Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-24T03:37:25.501272Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T03:37:25.503825Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-24T03:37:25.502905Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-24T03:37:25.504027Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-24T03:37:25.506444Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"68eaea490fab4e05","local-member-id":"9f0758e1c58a86ed","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T03:37:25.506667Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T03:37:25.506735Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
==> kernel <==
03:38:15 up 2:20, 0 user, load average: 2.55, 3.20, 2.77
Linux old-k8s-version-098965 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [37f20e76ffbc24c2b929d70181ec4667f979dd10e9528ae0a376dca755a608bd] <==
I1124 03:37:49.827895 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1124 03:37:49.828142 1 main.go:139] hostIP = 192.168.85.2
podIP = 192.168.85.2
I1124 03:37:49.828290 1 main.go:148] setting mtu 1500 for CNI
I1124 03:37:49.828302 1 main.go:178] kindnetd IP family: "ipv4"
I1124 03:37:49.828312 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-24T03:37:50Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1124 03:37:50.033133 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1124 03:37:50.033241 1 controller.go:381] "Waiting for informer caches to sync"
I1124 03:37:50.033288 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1124 03:37:50.034681 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1124 03:37:50.324571 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1124 03:37:50.324658 1 metrics.go:72] Registering metrics
I1124 03:37:50.324749 1 controller.go:711] "Syncing nftables rules"
I1124 03:38:00.040225 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1124 03:38:00.040277 1 main.go:301] handling current node
I1124 03:38:10.032819 1 main.go:297] Handling node with IPs: map[192.168.85.2:{}]
I1124 03:38:10.033055 1 main.go:301] handling current node
==> kube-apiserver [666ad3b5bbcc57cef3344095ab7c6a95424fcdae77e237214b172a62b87abb2e] <==
I1124 03:37:29.817316 1 apf_controller.go:380] Running API Priority and Fairness periodic rebalancing process
I1124 03:37:29.821018 1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
I1124 03:37:29.821061 1 aggregator.go:166] initial CRD sync complete...
I1124 03:37:29.821069 1 autoregister_controller.go:141] Starting autoregister controller
I1124 03:37:29.821233 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1124 03:37:29.821307 1 cache.go:39] Caches are synced for autoregister controller
I1124 03:37:29.822681 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1124 03:37:29.854244 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1124 03:37:29.879343 1 shared_informer.go:318] Caches are synced for node_authorizer
I1124 03:37:29.891373 1 cache.go:39] Caches are synced for AvailableConditionController controller
I1124 03:37:30.501665 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1124 03:37:30.515842 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1124 03:37:30.515870 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1124 03:37:31.168083 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1124 03:37:31.220692 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1124 03:37:31.327576 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1124 03:37:31.335539 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.85.2]
I1124 03:37:31.336837 1 controller.go:624] quota admission added evaluator for: endpoints
I1124 03:37:31.342035 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1124 03:37:31.795879 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1124 03:37:32.895003 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1124 03:37:32.910864 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1124 03:37:32.928122 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1124 03:37:45.687691 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1124 03:37:46.683285 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [94d7bde87dab52f8ec3b1763043f2afa14f31bf91ba4ddd110aa3c091eb1f236] <==
I1124 03:37:45.830060 1 node_lifecycle_controller.go:1225] "Initializing eviction metric for zone" zone=""
I1124 03:37:45.830629 1 node_lifecycle_controller.go:877] "Missing timestamp for Node. Assuming now as a timestamp" node="old-k8s-version-098965"
I1124 03:37:45.832023 1 node_lifecycle_controller.go:1029] "Controller detected that all Nodes are not-Ready. Entering master disruption mode"
I1124 03:37:45.830221 1 event.go:307] "Event occurred" object="old-k8s-version-098965" fieldPath="" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node old-k8s-version-098965 event: Registered Node old-k8s-version-098965 in Controller"
I1124 03:37:46.234151 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 03:37:46.277193 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 03:37:46.277382 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1124 03:37:46.504268 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-xqjm9"
I1124 03:37:46.531473 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-2kmf2"
I1124 03:37:46.546744 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="854.357749ms"
I1124 03:37:46.566310 1 event.go:307] "Event occurred" object="kube-dns" fieldPath="" kind="Endpoints" apiVersion="v1" type="Warning" reason="FailedToCreateEndpoint" message="Failed to create endpoint for service kube-system/kube-dns: endpoints \"kube-dns\" already exists"
I1124 03:37:46.584884 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="38.082715ms"
I1124 03:37:46.585113 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="149.391µs"
I1124 03:37:46.696833 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-5t7nq"
I1124 03:37:46.703751 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-mctv9"
I1124 03:37:48.352432 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1124 03:37:48.387265 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-xqjm9"
I1124 03:37:48.403262 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="51.894657ms"
I1124 03:37:48.414134 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="10.824275ms"
I1124 03:37:48.414238 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="43.528µs"
I1124 03:38:00.391250 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="131.267µs"
I1124 03:38:00.449093 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="133.589µs"
I1124 03:38:00.836415 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
I1124 03:38:01.292788 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="13.437073ms"
I1124 03:38:01.294027 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="1.155904ms"
==> kube-proxy [4baa8c107b38cc2761e31cd050e33ec89802d4aa44bd4f1d1d031950a9d835ec] <==
I1124 03:37:47.752353 1 server_others.go:69] "Using iptables proxy"
I1124 03:37:47.775066 1 node.go:141] Successfully retrieved node IP: 192.168.85.2
I1124 03:37:47.844709 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1124 03:37:47.849188 1 server_others.go:152] "Using iptables Proxier"
I1124 03:37:47.849234 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1124 03:37:47.849286 1 server_others.go:438] "Defaulting to no-op detect-local"
I1124 03:37:47.849319 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1124 03:37:47.849526 1 server.go:846] "Version info" version="v1.28.0"
I1124 03:37:47.849543 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1124 03:37:47.851283 1 config.go:188] "Starting service config controller"
I1124 03:37:47.851308 1 shared_informer.go:311] Waiting for caches to sync for service config
I1124 03:37:47.851328 1 config.go:97] "Starting endpoint slice config controller"
I1124 03:37:47.851333 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1124 03:37:47.851909 1 config.go:315] "Starting node config controller"
I1124 03:37:47.851919 1 shared_informer.go:311] Waiting for caches to sync for node config
I1124 03:37:47.952223 1 shared_informer.go:318] Caches are synced for node config
I1124 03:37:47.952255 1 shared_informer.go:318] Caches are synced for service config
I1124 03:37:47.952281 1 shared_informer.go:318] Caches are synced for endpoint slice config
==> kube-scheduler [95905c97af2e4e393feeaef2edf3e1c7c5fc6dcb11cccf3554a17255c56bd15d] <==
W1124 03:37:29.836095 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1124 03:37:29.836123 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1124 03:37:30.637685 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1124 03:37:30.637945 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1124 03:37:30.642715 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1124 03:37:30.642753 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1124 03:37:30.708776 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1124 03:37:30.708817 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1124 03:37:30.711532 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1124 03:37:30.711569 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1124 03:37:30.717417 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1124 03:37:30.717460 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1124 03:37:30.738383 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1124 03:37:30.738423 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1124 03:37:30.770745 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1124 03:37:30.770991 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1124 03:37:30.836552 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1124 03:37:30.836594 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1124 03:37:30.842629 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1124 03:37:30.842859 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1124 03:37:30.843777 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1124 03:37:30.843981 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1124 03:37:30.921894 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1124 03:37:30.922102 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
I1124 03:37:33.702680 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 24 03:37:45 old-k8s-version-098965 kubelet[1540]: I1124 03:37:45.681266 1540 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.713561 1540 topology_manager.go:215] "Topology Admit Handler" podUID="6050bdb0-6390-48c7-863f-520ef6277ad8" podNamespace="kube-system" podName="kube-proxy-5t7nq"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.716233 1540 topology_manager.go:215] "Topology Admit Handler" podUID="0f0d91cd-7d64-482e-b33c-383b20f5bd79" podNamespace="kube-system" podName="kindnet-mctv9"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.767542 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/0f0d91cd-7d64-482e-b33c-383b20f5bd79-cni-cfg\") pod \"kindnet-mctv9\" (UID: \"0f0d91cd-7d64-482e-b33c-383b20f5bd79\") " pod="kube-system/kindnet-mctv9"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.767756 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/0f0d91cd-7d64-482e-b33c-383b20f5bd79-xtables-lock\") pod \"kindnet-mctv9\" (UID: \"0f0d91cd-7d64-482e-b33c-383b20f5bd79\") " pod="kube-system/kindnet-mctv9"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.767863 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgdbr\" (UniqueName: \"kubernetes.io/projected/0f0d91cd-7d64-482e-b33c-383b20f5bd79-kube-api-access-tgdbr\") pod \"kindnet-mctv9\" (UID: \"0f0d91cd-7d64-482e-b33c-383b20f5bd79\") " pod="kube-system/kindnet-mctv9"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.767964 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/6050bdb0-6390-48c7-863f-520ef6277ad8-xtables-lock\") pod \"kube-proxy-5t7nq\" (UID: \"6050bdb0-6390-48c7-863f-520ef6277ad8\") " pod="kube-system/kube-proxy-5t7nq"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.768057 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/6050bdb0-6390-48c7-863f-520ef6277ad8-lib-modules\") pod \"kube-proxy-5t7nq\" (UID: \"6050bdb0-6390-48c7-863f-520ef6277ad8\") " pod="kube-system/kube-proxy-5t7nq"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.768153 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnmtw\" (UniqueName: \"kubernetes.io/projected/6050bdb0-6390-48c7-863f-520ef6277ad8-kube-api-access-dnmtw\") pod \"kube-proxy-5t7nq\" (UID: \"6050bdb0-6390-48c7-863f-520ef6277ad8\") " pod="kube-system/kube-proxy-5t7nq"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.768259 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/6050bdb0-6390-48c7-863f-520ef6277ad8-kube-proxy\") pod \"kube-proxy-5t7nq\" (UID: \"6050bdb0-6390-48c7-863f-520ef6277ad8\") " pod="kube-system/kube-proxy-5t7nq"
Nov 24 03:37:46 old-k8s-version-098965 kubelet[1540]: I1124 03:37:46.768359 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/0f0d91cd-7d64-482e-b33c-383b20f5bd79-lib-modules\") pod \"kindnet-mctv9\" (UID: \"0f0d91cd-7d64-482e-b33c-383b20f5bd79\") " pod="kube-system/kindnet-mctv9"
Nov 24 03:37:50 old-k8s-version-098965 kubelet[1540]: I1124 03:37:50.218063 1540 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-5t7nq" podStartSLOduration=4.2180200469999996 podCreationTimestamp="2025-11-24 03:37:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 03:37:48.208401476 +0000 UTC m=+15.367432854" watchObservedRunningTime="2025-11-24 03:37:50.218020047 +0000 UTC m=+17.377051399"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.190884 1540 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.379008 1540 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-mctv9" podStartSLOduration=12.155727494 podCreationTimestamp="2025-11-24 03:37:46 +0000 UTC" firstStartedPulling="2025-11-24 03:37:47.330956178 +0000 UTC m=+14.489987539" lastFinishedPulling="2025-11-24 03:37:49.554182857 +0000 UTC m=+16.713214218" observedRunningTime="2025-11-24 03:37:50.219024146 +0000 UTC m=+17.378055507" watchObservedRunningTime="2025-11-24 03:38:00.378954173 +0000 UTC m=+27.537985543"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.379210 1540 topology_manager.go:215] "Topology Admit Handler" podUID="9ede1da5-704c-4aab-93e0-77ce93158129" podNamespace="kube-system" podName="storage-provisioner"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.394275 1540 topology_manager.go:215] "Topology Admit Handler" podUID="9c6642fb-17b7-4199-b927-eb63b9a58260" podNamespace="kube-system" podName="coredns-5dd5756b68-2kmf2"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.504386 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgg48\" (UniqueName: \"kubernetes.io/projected/9c6642fb-17b7-4199-b927-eb63b9a58260-kube-api-access-fgg48\") pod \"coredns-5dd5756b68-2kmf2\" (UID: \"9c6642fb-17b7-4199-b927-eb63b9a58260\") " pod="kube-system/coredns-5dd5756b68-2kmf2"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.504451 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c6642fb-17b7-4199-b927-eb63b9a58260-config-volume\") pod \"coredns-5dd5756b68-2kmf2\" (UID: \"9c6642fb-17b7-4199-b927-eb63b9a58260\") " pod="kube-system/coredns-5dd5756b68-2kmf2"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.504532 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8snrh\" (UniqueName: \"kubernetes.io/projected/9ede1da5-704c-4aab-93e0-77ce93158129-kube-api-access-8snrh\") pod \"storage-provisioner\" (UID: \"9ede1da5-704c-4aab-93e0-77ce93158129\") " pod="kube-system/storage-provisioner"
Nov 24 03:38:00 old-k8s-version-098965 kubelet[1540]: I1124 03:38:00.504567 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/9ede1da5-704c-4aab-93e0-77ce93158129-tmp\") pod \"storage-provisioner\" (UID: \"9ede1da5-704c-4aab-93e0-77ce93158129\") " pod="kube-system/storage-provisioner"
Nov 24 03:38:01 old-k8s-version-098965 kubelet[1540]: I1124 03:38:01.277737 1540 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=13.277693839 podCreationTimestamp="2025-11-24 03:37:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 03:38:01.256250253 +0000 UTC m=+28.415281605" watchObservedRunningTime="2025-11-24 03:38:01.277693839 +0000 UTC m=+28.436725192"
Nov 24 03:38:03 old-k8s-version-098965 kubelet[1540]: I1124 03:38:03.329633 1540 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-2kmf2" podStartSLOduration=17.329588381 podCreationTimestamp="2025-11-24 03:37:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 03:38:01.279992944 +0000 UTC m=+28.439024297" watchObservedRunningTime="2025-11-24 03:38:03.329588381 +0000 UTC m=+30.488619734"
Nov 24 03:38:03 old-k8s-version-098965 kubelet[1540]: I1124 03:38:03.329845 1540 topology_manager.go:215] "Topology Admit Handler" podUID="b377806c-ae20-44d2-9d0f-07b097026328" podNamespace="default" podName="busybox"
Nov 24 03:38:03 old-k8s-version-098965 kubelet[1540]: I1124 03:38:03.426801 1540 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wn2qh\" (UniqueName: \"kubernetes.io/projected/b377806c-ae20-44d2-9d0f-07b097026328-kube-api-access-wn2qh\") pod \"busybox\" (UID: \"b377806c-ae20-44d2-9d0f-07b097026328\") " pod="default/busybox"
Nov 24 03:38:06 old-k8s-version-098965 kubelet[1540]: I1124 03:38:06.274643 1540 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.222747006 podCreationTimestamp="2025-11-24 03:38:03 +0000 UTC" firstStartedPulling="2025-11-24 03:38:03.75886784 +0000 UTC m=+30.917899193" lastFinishedPulling="2025-11-24 03:38:05.810715943 +0000 UTC m=+32.969747296" observedRunningTime="2025-11-24 03:38:06.27449371 +0000 UTC m=+33.433525071" watchObservedRunningTime="2025-11-24 03:38:06.274595109 +0000 UTC m=+33.433626495"
==> storage-provisioner [32ab776c7affb85bd5965dee0104d1470d0553d2b7a80e479ea0fc030ea67388] <==
I1124 03:38:01.039603 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1124 03:38:01.054106 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1124 03:38:01.054328 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1124 03:38:01.064918 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1124 03:38:01.065095 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-098965_e3e8caf0-85bd-4d0b-af08-80a33b7d616e!
I1124 03:38:01.066102 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"c304fee8-eb73-4695-8997-27ec70001b31", APIVersion:"v1", ResourceVersion:"438", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-098965_e3e8caf0-85bd-4d0b-af08-80a33b7d616e became leader
I1124 03:38:01.165252 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-098965_e3e8caf0-85bd-4d0b-af08-80a33b7d616e!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-098965 -n old-k8s-version-098965
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-098965 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestStartStop/group/old-k8s-version/serial/DeployApp (12.85s)