=== RUN TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-092258 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [4fd396a4-7f86-4bac-b99a-f7427bb5deb9] Pending
helpers_test.go:352: "busybox" [4fd396a4-7f86-4bac-b99a-f7427bb5deb9] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [4fd396a4-7f86-4bac-b99a-f7427bb5deb9] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 9.004230521s
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-092258 exec busybox -- /bin/sh -c "ulimit -n"
start_stop_delete_test.go:194: 'ulimit -n' returned 1024, expected 1048576
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-092258
helpers_test.go:243: (dbg) docker inspect old-k8s-version-092258:
-- stdout --
[
{
"Id": "06d5dd86afe167e6eb08ea3044c0d6356df71a87f2b7acc2267f870459c4f2cd",
"Created": "2025-11-21T14:43:42.553015288Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 2835562,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-21T14:43:42.615467666Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:6dfeb5329cc83d126555f88f43b09eef7a09c7f546c9166b94d33747df91b6df",
"ResolvConfPath": "/var/lib/docker/containers/06d5dd86afe167e6eb08ea3044c0d6356df71a87f2b7acc2267f870459c4f2cd/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/06d5dd86afe167e6eb08ea3044c0d6356df71a87f2b7acc2267f870459c4f2cd/hostname",
"HostsPath": "/var/lib/docker/containers/06d5dd86afe167e6eb08ea3044c0d6356df71a87f2b7acc2267f870459c4f2cd/hosts",
"LogPath": "/var/lib/docker/containers/06d5dd86afe167e6eb08ea3044c0d6356df71a87f2b7acc2267f870459c4f2cd/06d5dd86afe167e6eb08ea3044c0d6356df71a87f2b7acc2267f870459c4f2cd-json.log",
"Name": "/old-k8s-version-092258",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"old-k8s-version-092258:/var",
"/lib/modules:/lib/modules:ro"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "old-k8s-version-092258",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "06d5dd86afe167e6eb08ea3044c0d6356df71a87f2b7acc2267f870459c4f2cd",
"LowerDir": "/var/lib/docker/overlay2/8452e0c048f2d0756f64c494882e5db8b7ecd5ac7b4b99aa190200898d89fa81-init/diff:/var/lib/docker/overlay2/789a4b9f9866e585907664b1eaf98d94438dbf699e0511f3ca5ba5ea682b005e/diff",
"MergedDir": "/var/lib/docker/overlay2/8452e0c048f2d0756f64c494882e5db8b7ecd5ac7b4b99aa190200898d89fa81/merged",
"UpperDir": "/var/lib/docker/overlay2/8452e0c048f2d0756f64c494882e5db8b7ecd5ac7b4b99aa190200898d89fa81/diff",
"WorkDir": "/var/lib/docker/overlay2/8452e0c048f2d0756f64c494882e5db8b7ecd5ac7b4b99aa190200898d89fa81/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "old-k8s-version-092258",
"Source": "/var/lib/docker/volumes/old-k8s-version-092258/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "old-k8s-version-092258",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-092258",
"name.minikube.sigs.k8s.io": "old-k8s-version-092258",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "858034611e5cf97ef820625d4dcf77e9b3d1510529f8fc62d29cb6c8391e9b31",
"SandboxKey": "/var/run/docker/netns/858034611e5c",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "36720"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "36721"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "36724"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "36722"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "36723"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"old-k8s-version-092258": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "66:d3:7f:90:c5:6d",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "02cac79c841e49103d05ede51175e2b52d6dd809de4e55337963bb73586b9563",
"EndpointID": "d33cdf4d1c16a9f5945ab24f1a11153f9cd4665673d1a383064fc3d56825842f",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-092258",
"06d5dd86afe1"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-092258 -n old-k8s-version-092258
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p old-k8s-version-092258 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p old-k8s-version-092258 logs -n 25: (1.187170709s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────┬─────────┬─────────┬─────────────────────┬────────────
─────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────┼─────────┼─────────┼─────────────────────┼────────────
─────────┤
│ ssh │ -p cilium-650772 sudo cat /etc/docker/daemon.json │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo docker system info │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo systemctl status cri-docker --all --full --no-pager │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo systemctl cat cri-docker --no-pager │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo cri-dockerd --version │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo systemctl status containerd --all --full --no-pager │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo systemctl cat containerd --no-pager │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo cat /lib/systemd/system/containerd.service │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo cat /etc/containerd/config.toml │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo containerd config dump │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo systemctl status crio --all --full --no-pager │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo systemctl cat crio --no-pager │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo crio config │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ delete │ -p cilium-650772 │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ 21 Nov 25 14:42 UTC │
│ start │ -p cert-expiration-184410 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=containerd │ cert-expiration-184410 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ 21 Nov 25 14:42 UTC │
│ ssh │ force-systemd-env-041746 ssh cat /etc/containerd/config.toml │ force-systemd-env-041746 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ 21 Nov 25 14:42 UTC │
│ delete │ -p force-systemd-env-041746 │ force-systemd-env-041746 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ 21 Nov 25 14:43 UTC │
│ start │ -p cert-options-035007 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-035007 │ jenkins │ v1.37.0 │ 21 Nov 25 14:43 UTC │ 21 Nov 25 14:43 UTC │
│ ssh │ cert-options-035007 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-035007 │ jenkins │ v1.37.0 │ 21 Nov 25 14:43 UTC │ 21 Nov 25 14:43 UTC │
│ ssh │ -p cert-options-035007 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-035007 │ jenkins │ v1.37.0 │ 21 Nov 25 14:43 UTC │ 21 Nov 25 14:43 UTC │
│ delete │ -p cert-options-035007 │ cert-options-035007 │ jenkins │ v1.37.0 │ 21 Nov 25 14:43 UTC │ 21 Nov 25 14:43 UTC │
│ start │ -p old-k8s-version-092258 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-092258 │ jenkins │ v1.37.0 │ 21 Nov 25 14:43 UTC │ 21 Nov 25 14:44 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────┴─────────┴─────────┴─────────────────────┴────────────
─────────┘
==> Last Start <==
Log file created at: 2025/11/21 14:43:36
Running on machine: ip-172-31-21-244
Binary: Built with gc go1.24.6 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1121 14:43:36.383330 2835167 out.go:360] Setting OutFile to fd 1 ...
I1121 14:43:36.383513 2835167 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1121 14:43:36.383524 2835167 out.go:374] Setting ErrFile to fd 2...
I1121 14:43:36.383530 2835167 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1121 14:43:36.383828 2835167 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21847-2633933/.minikube/bin
I1121 14:43:36.384345 2835167 out.go:368] Setting JSON to false
I1121 14:43:36.385436 2835167 start.go:133] hostinfo: {"hostname":"ip-172-31-21-244","uptime":69965,"bootTime":1763666252,"procs":190,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"da8ac1fd-6236-412a-a346-95873c98230d"}
I1121 14:43:36.385513 2835167 start.go:143] virtualization:
I1121 14:43:36.390376 2835167 out.go:179] * [old-k8s-version-092258] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I1121 14:43:36.394191 2835167 notify.go:221] Checking for updates...
I1121 14:43:36.397425 2835167 out.go:179] - MINIKUBE_LOCATION=21847
I1121 14:43:36.400714 2835167 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1121 14:43:36.403761 2835167 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21847-2633933/kubeconfig
I1121 14:43:36.406876 2835167 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21847-2633933/.minikube
I1121 14:43:36.409847 2835167 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I1121 14:43:36.413120 2835167 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1121 14:43:36.416775 2835167 config.go:182] Loaded profile config "cert-expiration-184410": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1121 14:43:36.416939 2835167 driver.go:422] Setting default libvirt URI to qemu:///system
I1121 14:43:36.450855 2835167 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I1121 14:43:36.450991 2835167 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1121 14:43:36.516624 2835167 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-21 14:43:36.506387596 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1121 14:43:36.516732 2835167 docker.go:319] overlay module found
I1121 14:43:36.519985 2835167 out.go:179] * Using the docker driver based on user configuration
I1121 14:43:36.522932 2835167 start.go:309] selected driver: docker
I1121 14:43:36.522956 2835167 start.go:930] validating driver "docker" against <nil>
I1121 14:43:36.522972 2835167 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1121 14:43:36.523794 2835167 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1121 14:43:36.580334 2835167 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-21 14:43:36.571381735 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1121 14:43:36.580506 2835167 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1121 14:43:36.580737 2835167 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1121 14:43:36.583854 2835167 out.go:179] * Using Docker driver with root privileges
I1121 14:43:36.586764 2835167 cni.go:84] Creating CNI manager for ""
I1121 14:43:36.586838 2835167 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:43:36.586852 2835167 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1121 14:43:36.586941 2835167 start.go:353] cluster config:
{Name:old-k8s-version-092258 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-092258 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSH
AuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1121 14:43:36.590049 2835167 out.go:179] * Starting "old-k8s-version-092258" primary control-plane node in "old-k8s-version-092258" cluster
I1121 14:43:36.592891 2835167 cache.go:134] Beginning downloading kic base image for docker with containerd
I1121 14:43:36.595918 2835167 out.go:179] * Pulling base image v0.0.48-1763507788-21924 ...
I1121 14:43:36.598784 2835167 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1121 14:43:36.598825 2835167 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a in local docker daemon
I1121 14:43:36.598850 2835167 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21847-2633933/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4
I1121 14:43:36.598866 2835167 cache.go:65] Caching tarball of preloaded images
I1121 14:43:36.598958 2835167 preload.go:238] Found /home/jenkins/minikube-integration/21847-2633933/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I1121 14:43:36.598968 2835167 cache.go:68] Finished verifying existence of preloaded tar for v1.28.0 on containerd
I1121 14:43:36.599136 2835167 profile.go:143] Saving config to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/config.json ...
I1121 14:43:36.599165 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/config.json: {Name:mk03fe35747f6c73b79e2daee9ca9c7b13210439 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:36.618070 2835167 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a in local docker daemon, skipping pull
I1121 14:43:36.618098 2835167 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a exists in daemon, skipping load
I1121 14:43:36.618116 2835167 cache.go:243] Successfully downloaded all kic artifacts
I1121 14:43:36.618138 2835167 start.go:360] acquireMachinesLock for old-k8s-version-092258: {Name:mkf21290144e8164ceda2548005b3a6e3ed2df4c Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1121 14:43:36.618251 2835167 start.go:364] duration metric: took 91.969µs to acquireMachinesLock for "old-k8s-version-092258"
I1121 14:43:36.618280 2835167 start.go:93] Provisioning new machine with config: &{Name:old-k8s-version-092258 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-092258 Namespace:default APIServerHAVIP:
APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:f
alse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1121 14:43:36.618362 2835167 start.go:125] createHost starting for "" (driver="docker")
I1121 14:43:36.621674 2835167 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1121 14:43:36.621900 2835167 start.go:159] libmachine.API.Create for "old-k8s-version-092258" (driver="docker")
I1121 14:43:36.621945 2835167 client.go:173] LocalClient.Create starting
I1121 14:43:36.622015 2835167 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca.pem
I1121 14:43:36.622055 2835167 main.go:143] libmachine: Decoding PEM data...
I1121 14:43:36.622071 2835167 main.go:143] libmachine: Parsing certificate...
I1121 14:43:36.622122 2835167 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/cert.pem
I1121 14:43:36.622144 2835167 main.go:143] libmachine: Decoding PEM data...
I1121 14:43:36.622155 2835167 main.go:143] libmachine: Parsing certificate...
I1121 14:43:36.622504 2835167 cli_runner.go:164] Run: docker network inspect old-k8s-version-092258 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1121 14:43:36.638581 2835167 cli_runner.go:211] docker network inspect old-k8s-version-092258 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1121 14:43:36.638673 2835167 network_create.go:284] running [docker network inspect old-k8s-version-092258] to gather additional debugging logs...
I1121 14:43:36.638695 2835167 cli_runner.go:164] Run: docker network inspect old-k8s-version-092258
W1121 14:43:36.654171 2835167 cli_runner.go:211] docker network inspect old-k8s-version-092258 returned with exit code 1
I1121 14:43:36.654200 2835167 network_create.go:287] error running [docker network inspect old-k8s-version-092258]: docker network inspect old-k8s-version-092258: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-092258 not found
I1121 14:43:36.654221 2835167 network_create.go:289] output of [docker network inspect old-k8s-version-092258]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-092258 not found
** /stderr **
I1121 14:43:36.654336 2835167 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1121 14:43:36.670217 2835167 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-c13a3bee40ff IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:02:9f:8e:c6:2a:d6} reservation:<nil>}
I1121 14:43:36.670512 2835167 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-1859e8fd5584 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:c6:c6:00:f6:5b:96} reservation:<nil>}
I1121 14:43:36.670770 2835167 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-44a9b6062c4d IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:82:b5:31:a5:3d:f0} reservation:<nil>}
I1121 14:43:36.671175 2835167 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001a1a410}
I1121 14:43:36.671200 2835167 network_create.go:124] attempt to create docker network old-k8s-version-092258 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I1121 14:43:36.671260 2835167 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-092258 old-k8s-version-092258
I1121 14:43:36.731269 2835167 network_create.go:108] docker network old-k8s-version-092258 192.168.76.0/24 created
I1121 14:43:36.731302 2835167 kic.go:121] calculated static IP "192.168.76.2" for the "old-k8s-version-092258" container
I1121 14:43:36.731379 2835167 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1121 14:43:36.747231 2835167 cli_runner.go:164] Run: docker volume create old-k8s-version-092258 --label name.minikube.sigs.k8s.io=old-k8s-version-092258 --label created_by.minikube.sigs.k8s.io=true
I1121 14:43:36.766444 2835167 oci.go:103] Successfully created a docker volume old-k8s-version-092258
I1121 14:43:36.766529 2835167 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-092258-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-092258 --entrypoint /usr/bin/test -v old-k8s-version-092258:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a -d /var/lib
I1121 14:43:37.323084 2835167 oci.go:107] Successfully prepared a docker volume old-k8s-version-092258
I1121 14:43:37.323160 2835167 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1121 14:43:37.323176 2835167 kic.go:194] Starting extracting preloaded images to volume ...
I1121 14:43:37.323249 2835167 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21847-2633933/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-092258:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a -I lz4 -xf /preloaded.tar -C /extractDir
I1121 14:43:42.479471 2835167 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21847-2633933/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-092258:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a -I lz4 -xf /preloaded.tar -C /extractDir: (5.156180728s)
I1121 14:43:42.479504 2835167 kic.go:203] duration metric: took 5.156324945s to extract preloaded images to volume ...
W1121 14:43:42.479641 2835167 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1121 14:43:42.479761 2835167 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1121 14:43:42.537202 2835167 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-092258 --name old-k8s-version-092258 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-092258 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-092258 --network old-k8s-version-092258 --ip 192.168.76.2 --volume old-k8s-version-092258:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a
I1121 14:43:42.837167 2835167 cli_runner.go:164] Run: docker container inspect old-k8s-version-092258 --format={{.State.Running}}
I1121 14:43:42.855671 2835167 cli_runner.go:164] Run: docker container inspect old-k8s-version-092258 --format={{.State.Status}}
I1121 14:43:42.878990 2835167 cli_runner.go:164] Run: docker exec old-k8s-version-092258 stat /var/lib/dpkg/alternatives/iptables
I1121 14:43:42.930669 2835167 oci.go:144] the created container "old-k8s-version-092258" has a running status.
I1121 14:43:42.930708 2835167 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa...
I1121 14:43:43.641994 2835167 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1121 14:43:43.662600 2835167 cli_runner.go:164] Run: docker container inspect old-k8s-version-092258 --format={{.State.Status}}
I1121 14:43:43.680831 2835167 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1121 14:43:43.680859 2835167 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-092258 chown docker:docker /home/docker/.ssh/authorized_keys]
I1121 14:43:43.723054 2835167 cli_runner.go:164] Run: docker container inspect old-k8s-version-092258 --format={{.State.Status}}
I1121 14:43:43.743125 2835167 machine.go:94] provisionDockerMachine start ...
I1121 14:43:43.743225 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:43.760910 2835167 main.go:143] libmachine: Using SSH client type: native
I1121 14:43:43.761356 2835167 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 36720 <nil> <nil>}
I1121 14:43:43.761377 2835167 main.go:143] libmachine: About to run SSH command:
hostname
I1121 14:43:43.762044 2835167 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1121 14:43:46.904947 2835167 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-092258
I1121 14:43:46.904969 2835167 ubuntu.go:182] provisioning hostname "old-k8s-version-092258"
I1121 14:43:46.905064 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:46.922185 2835167 main.go:143] libmachine: Using SSH client type: native
I1121 14:43:46.922512 2835167 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 36720 <nil> <nil>}
I1121 14:43:46.922533 2835167 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-092258 && echo "old-k8s-version-092258" | sudo tee /etc/hostname
I1121 14:43:47.078378 2835167 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-092258
I1121 14:43:47.078461 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:47.102253 2835167 main.go:143] libmachine: Using SSH client type: native
I1121 14:43:47.102562 2835167 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 36720 <nil> <nil>}
I1121 14:43:47.102584 2835167 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-092258' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-092258/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-092258' | sudo tee -a /etc/hosts;
fi
fi
I1121 14:43:47.245266 2835167 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1121 14:43:47.245351 2835167 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21847-2633933/.minikube CaCertPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21847-2633933/.minikube}
I1121 14:43:47.245378 2835167 ubuntu.go:190] setting up certificates
I1121 14:43:47.245387 2835167 provision.go:84] configureAuth start
I1121 14:43:47.245451 2835167 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-092258
I1121 14:43:47.263540 2835167 provision.go:143] copyHostCerts
I1121 14:43:47.263612 2835167 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-2633933/.minikube/ca.pem, removing ...
I1121 14:43:47.263626 2835167 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-2633933/.minikube/ca.pem
I1121 14:43:47.263706 2835167 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21847-2633933/.minikube/ca.pem (1082 bytes)
I1121 14:43:47.263811 2835167 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-2633933/.minikube/cert.pem, removing ...
I1121 14:43:47.263822 2835167 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-2633933/.minikube/cert.pem
I1121 14:43:47.263853 2835167 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21847-2633933/.minikube/cert.pem (1123 bytes)
I1121 14:43:47.263922 2835167 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-2633933/.minikube/key.pem, removing ...
I1121 14:43:47.263932 2835167 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-2633933/.minikube/key.pem
I1121 14:43:47.263960 2835167 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21847-2633933/.minikube/key.pem (1679 bytes)
I1121 14:43:47.264022 2835167 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-092258 san=[127.0.0.1 192.168.76.2 localhost minikube old-k8s-version-092258]
I1121 14:43:48.319202 2835167 provision.go:177] copyRemoteCerts
I1121 14:43:48.319298 2835167 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1121 14:43:48.319410 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:48.336010 2835167 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:36720 SSHKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa Username:docker}
I1121 14:43:48.436628 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1121 14:43:48.454108 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1121 14:43:48.472424 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1121 14:43:48.490030 2835167 provision.go:87] duration metric: took 1.244618966s to configureAuth
I1121 14:43:48.490068 2835167 ubuntu.go:206] setting minikube options for container-runtime
I1121 14:43:48.490248 2835167 config.go:182] Loaded profile config "old-k8s-version-092258": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1121 14:43:48.490256 2835167 machine.go:97] duration metric: took 4.747108297s to provisionDockerMachine
I1121 14:43:48.490263 2835167 client.go:176] duration metric: took 11.868306871s to LocalClient.Create
I1121 14:43:48.490277 2835167 start.go:167] duration metric: took 11.868378746s to libmachine.API.Create "old-k8s-version-092258"
I1121 14:43:48.490284 2835167 start.go:293] postStartSetup for "old-k8s-version-092258" (driver="docker")
I1121 14:43:48.490298 2835167 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1121 14:43:48.490349 2835167 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1121 14:43:48.490386 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:48.506758 2835167 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:36720 SSHKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa Username:docker}
I1121 14:43:48.608899 2835167 ssh_runner.go:195] Run: cat /etc/os-release
I1121 14:43:48.612079 2835167 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1121 14:43:48.612112 2835167 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1121 14:43:48.612141 2835167 filesync.go:126] Scanning /home/jenkins/minikube-integration/21847-2633933/.minikube/addons for local assets ...
I1121 14:43:48.612212 2835167 filesync.go:126] Scanning /home/jenkins/minikube-integration/21847-2633933/.minikube/files for local assets ...
I1121 14:43:48.612293 2835167 filesync.go:149] local asset: /home/jenkins/minikube-integration/21847-2633933/.minikube/files/etc/ssl/certs/26357852.pem -> 26357852.pem in /etc/ssl/certs
I1121 14:43:48.612406 2835167 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1121 14:43:48.619568 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/files/etc/ssl/certs/26357852.pem --> /etc/ssl/certs/26357852.pem (1708 bytes)
I1121 14:43:48.636807 2835167 start.go:296] duration metric: took 146.508249ms for postStartSetup
I1121 14:43:48.637286 2835167 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-092258
I1121 14:43:48.653983 2835167 profile.go:143] Saving config to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/config.json ...
I1121 14:43:48.654267 2835167 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1121 14:43:48.654326 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:48.671301 2835167 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:36720 SSHKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa Username:docker}
I1121 14:43:48.770419 2835167 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1121 14:43:48.775323 2835167 start.go:128] duration metric: took 12.156945443s to createHost
I1121 14:43:48.775348 2835167 start.go:83] releasing machines lock for "old-k8s-version-092258", held for 12.157085763s
I1121 14:43:48.775420 2835167 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-092258
I1121 14:43:48.792336 2835167 ssh_runner.go:195] Run: cat /version.json
I1121 14:43:48.792390 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:48.792660 2835167 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1121 14:43:48.792747 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:48.822571 2835167 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:36720 SSHKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa Username:docker}
I1121 14:43:48.823165 2835167 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:36720 SSHKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa Username:docker}
I1121 14:43:49.010028 2835167 ssh_runner.go:195] Run: systemctl --version
I1121 14:43:49.017657 2835167 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1121 14:43:49.023491 2835167 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1121 14:43:49.023609 2835167 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1121 14:43:49.052303 2835167 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I1121 14:43:49.052375 2835167 start.go:496] detecting cgroup driver to use...
I1121 14:43:49.052424 2835167 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1121 14:43:49.052491 2835167 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1121 14:43:49.069013 2835167 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1121 14:43:49.083455 2835167 docker.go:218] disabling cri-docker service (if available) ...
I1121 14:43:49.083552 2835167 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1121 14:43:49.106526 2835167 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1121 14:43:49.129811 2835167 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1121 14:43:49.256720 2835167 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1121 14:43:49.389600 2835167 docker.go:234] disabling docker service ...
I1121 14:43:49.389683 2835167 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1121 14:43:49.410151 2835167 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1121 14:43:49.423211 2835167 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1121 14:43:49.545870 2835167 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1121 14:43:49.674290 2835167 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1121 14:43:49.688105 2835167 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1121 14:43:49.704461 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1121 14:43:49.715375 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1121 14:43:49.725179 2835167 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1121 14:43:49.725253 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1121 14:43:49.734991 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1121 14:43:49.745235 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1121 14:43:49.754840 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1121 14:43:49.764086 2835167 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1121 14:43:49.773152 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1121 14:43:49.781841 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1121 14:43:49.791026 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1121 14:43:49.800527 2835167 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1121 14:43:49.808398 2835167 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1121 14:43:49.815956 2835167 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:43:49.931886 2835167 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1121 14:43:50.066706 2835167 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1121 14:43:50.066831 2835167 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1121 14:43:50.071006 2835167 start.go:564] Will wait 60s for crictl version
I1121 14:43:50.071125 2835167 ssh_runner.go:195] Run: which crictl
I1121 14:43:50.075393 2835167 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1121 14:43:50.118651 2835167 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1121 14:43:50.118773 2835167 ssh_runner.go:195] Run: containerd --version
I1121 14:43:50.141212 2835167 ssh_runner.go:195] Run: containerd --version
I1121 14:43:50.169638 2835167 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1121 14:43:50.172499 2835167 cli_runner.go:164] Run: docker network inspect old-k8s-version-092258 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1121 14:43:50.189507 2835167 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I1121 14:43:50.198370 2835167 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1121 14:43:50.208962 2835167 kubeadm.go:884] updating cluster {Name:old-k8s-version-092258 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-092258 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1121 14:43:50.209203 2835167 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1121 14:43:50.209272 2835167 ssh_runner.go:195] Run: sudo crictl images --output json
I1121 14:43:50.234384 2835167 containerd.go:627] all images are preloaded for containerd runtime.
I1121 14:43:50.234409 2835167 containerd.go:534] Images already preloaded, skipping extraction
I1121 14:43:50.234475 2835167 ssh_runner.go:195] Run: sudo crictl images --output json
I1121 14:43:50.259395 2835167 containerd.go:627] all images are preloaded for containerd runtime.
I1121 14:43:50.259421 2835167 cache_images.go:86] Images are preloaded, skipping loading
I1121 14:43:50.259430 2835167 kubeadm.go:935] updating node { 192.168.76.2 8443 v1.28.0 containerd true true} ...
I1121 14:43:50.259536 2835167 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-092258 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-092258 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1121 14:43:50.259609 2835167 ssh_runner.go:195] Run: sudo crictl info
I1121 14:43:50.287070 2835167 cni.go:84] Creating CNI manager for ""
I1121 14:43:50.287095 2835167 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:43:50.287115 2835167 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1121 14:43:50.287139 2835167 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-092258 NodeName:old-k8s-version-092258 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt
StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1121 14:43:50.287271 2835167 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-092258"
kubeletExtraArgs:
node-ip: 192.168.76.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1121 14:43:50.287342 2835167 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1121 14:43:50.295386 2835167 binaries.go:51] Found k8s binaries, skipping transfer
I1121 14:43:50.295454 2835167 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1121 14:43:50.303213 2835167 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1121 14:43:50.317127 2835167 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1121 14:43:50.331240 2835167 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2176 bytes)
I1121 14:43:50.344296 2835167 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I1121 14:43:50.347919 2835167 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1121 14:43:50.357793 2835167 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:43:50.483017 2835167 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:43:50.499630 2835167 certs.go:69] Setting up /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258 for IP: 192.168.76.2
I1121 14:43:50.499697 2835167 certs.go:195] generating shared ca certs ...
I1121 14:43:50.499729 2835167 certs.go:227] acquiring lock for ca certs: {Name:mk0a1b8efa9f1d453751b4f7afafeea16d7243a3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:50.499912 2835167 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/ca.key
I1121 14:43:50.499982 2835167 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/proxy-client-ca.key
I1121 14:43:50.500020 2835167 certs.go:257] generating profile certs ...
I1121 14:43:50.500125 2835167 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/client.key
I1121 14:43:50.500157 2835167 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/client.crt with IP's: []
I1121 14:43:50.881389 2835167 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/client.crt ...
I1121 14:43:50.881423 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/client.crt: {Name:mkd66b37bd8f68df88ee391b1c0ae406d24100dc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:50.881622 2835167 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/client.key ...
I1121 14:43:50.881638 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/client.key: {Name:mk87497e50632ba54cdc705e25ae82f0b49d923a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:50.881733 2835167 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.key.fe0fc8ce
I1121 14:43:50.881751 2835167 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.crt.fe0fc8ce with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I1121 14:43:51.368107 2835167 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.crt.fe0fc8ce ...
I1121 14:43:51.368141 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.crt.fe0fc8ce: {Name:mke3412122bd471676c09fe30765bbb879486748 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:51.368348 2835167 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.key.fe0fc8ce ...
I1121 14:43:51.368363 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.key.fe0fc8ce: {Name:mkb54d05c24cffdedc4d0fc59e5780f32a7a4815 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:51.368463 2835167 certs.go:382] copying /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.crt.fe0fc8ce -> /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.crt
I1121 14:43:51.368560 2835167 certs.go:386] copying /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.key.fe0fc8ce -> /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.key
I1121 14:43:51.368628 2835167 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.key
I1121 14:43:51.368647 2835167 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.crt with IP's: []
I1121 14:43:51.447326 2835167 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.crt ...
I1121 14:43:51.447360 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.crt: {Name:mk8fd112c818af834b5d68c83f8c92f6291ef45d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:51.447577 2835167 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.key ...
I1121 14:43:51.447598 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.key: {Name:mk461afd810cf943501ef59a65730b33eecea0e3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:51.447801 2835167 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/2635785.pem (1338 bytes)
W1121 14:43:51.447843 2835167 certs.go:480] ignoring /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/2635785_empty.pem, impossibly tiny 0 bytes
I1121 14:43:51.447858 2835167 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca-key.pem (1675 bytes)
I1121 14:43:51.447889 2835167 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca.pem (1082 bytes)
I1121 14:43:51.447920 2835167 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/cert.pem (1123 bytes)
I1121 14:43:51.447947 2835167 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/key.pem (1679 bytes)
I1121 14:43:51.447993 2835167 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/files/etc/ssl/certs/26357852.pem (1708 bytes)
I1121 14:43:51.448566 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1121 14:43:51.466854 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I1121 14:43:51.484620 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1121 14:43:51.502526 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1121 14:43:51.521430 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1121 14:43:51.540179 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1121 14:43:51.560949 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1121 14:43:51.579376 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1121 14:43:51.598197 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1121 14:43:51.615934 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/2635785.pem --> /usr/share/ca-certificates/2635785.pem (1338 bytes)
I1121 14:43:51.634213 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/files/etc/ssl/certs/26357852.pem --> /usr/share/ca-certificates/26357852.pem (1708 bytes)
I1121 14:43:51.652095 2835167 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1121 14:43:51.664901 2835167 ssh_runner.go:195] Run: openssl version
I1121 14:43:51.671463 2835167 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1121 14:43:51.680109 2835167 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1121 14:43:51.684127 2835167 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 21 13:57 /usr/share/ca-certificates/minikubeCA.pem
I1121 14:43:51.684210 2835167 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1121 14:43:51.727682 2835167 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1121 14:43:51.736178 2835167 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/2635785.pem && ln -fs /usr/share/ca-certificates/2635785.pem /etc/ssl/certs/2635785.pem"
I1121 14:43:51.744471 2835167 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2635785.pem
I1121 14:43:51.748815 2835167 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 21 14:03 /usr/share/ca-certificates/2635785.pem
I1121 14:43:51.748886 2835167 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2635785.pem
I1121 14:43:51.790035 2835167 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/2635785.pem /etc/ssl/certs/51391683.0"
I1121 14:43:51.798184 2835167 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/26357852.pem && ln -fs /usr/share/ca-certificates/26357852.pem /etc/ssl/certs/26357852.pem"
I1121 14:43:51.806524 2835167 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/26357852.pem
I1121 14:43:51.810447 2835167 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 21 14:03 /usr/share/ca-certificates/26357852.pem
I1121 14:43:51.810539 2835167 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/26357852.pem
I1121 14:43:51.851288 2835167 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/26357852.pem /etc/ssl/certs/3ec20f2e.0"
I1121 14:43:51.859608 2835167 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1121 14:43:51.863193 2835167 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1121 14:43:51.863292 2835167 kubeadm.go:401] StartCluster: {Name:old-k8s-version-092258 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-092258 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1121 14:43:51.863366 2835167 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1121 14:43:51.863446 2835167 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1121 14:43:51.891859 2835167 cri.go:89] found id: ""
I1121 14:43:51.891941 2835167 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1121 14:43:51.900527 2835167 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1121 14:43:51.909151 2835167 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1121 14:43:51.909241 2835167 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1121 14:43:51.919737 2835167 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1121 14:43:51.919760 2835167 kubeadm.go:158] found existing configuration files:
I1121 14:43:51.919826 2835167 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1121 14:43:51.928877 2835167 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1121 14:43:51.928996 2835167 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1121 14:43:51.936697 2835167 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1121 14:43:51.944973 2835167 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1121 14:43:51.945066 2835167 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1121 14:43:51.952856 2835167 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1121 14:43:51.960547 2835167 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1121 14:43:51.960686 2835167 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1121 14:43:51.968472 2835167 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1121 14:43:51.976805 2835167 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1121 14:43:51.976888 2835167 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1121 14:43:51.984345 2835167 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1121 14:43:52.080198 2835167 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I1121 14:43:52.183959 2835167 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1121 14:44:07.478855 2835167 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1121 14:44:07.478915 2835167 kubeadm.go:319] [preflight] Running pre-flight checks
I1121 14:44:07.479009 2835167 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1121 14:44:07.479066 2835167 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I1121 14:44:07.479102 2835167 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1121 14:44:07.479150 2835167 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1121 14:44:07.479201 2835167 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1121 14:44:07.479250 2835167 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1121 14:44:07.479300 2835167 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1121 14:44:07.479351 2835167 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1121 14:44:07.479413 2835167 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1121 14:44:07.479461 2835167 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1121 14:44:07.479511 2835167 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1121 14:44:07.479559 2835167 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1121 14:44:07.479634 2835167 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1121 14:44:07.479732 2835167 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1121 14:44:07.479828 2835167 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1121 14:44:07.479893 2835167 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1121 14:44:07.482963 2835167 out.go:252] - Generating certificates and keys ...
I1121 14:44:07.483064 2835167 kubeadm.go:319] [certs] Using existing ca certificate authority
I1121 14:44:07.483132 2835167 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1121 14:44:07.483202 2835167 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1121 14:44:07.483261 2835167 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1121 14:44:07.483324 2835167 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1121 14:44:07.483377 2835167 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1121 14:44:07.483433 2835167 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1121 14:44:07.483564 2835167 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-092258] and IPs [192.168.76.2 127.0.0.1 ::1]
I1121 14:44:07.483619 2835167 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1121 14:44:07.483749 2835167 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-092258] and IPs [192.168.76.2 127.0.0.1 ::1]
I1121 14:44:07.483818 2835167 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1121 14:44:07.483886 2835167 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1121 14:44:07.483933 2835167 kubeadm.go:319] [certs] Generating "sa" key and public key
I1121 14:44:07.483992 2835167 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1121 14:44:07.484045 2835167 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1121 14:44:07.484101 2835167 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1121 14:44:07.484169 2835167 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1121 14:44:07.484226 2835167 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1121 14:44:07.484312 2835167 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1121 14:44:07.484381 2835167 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1121 14:44:07.487428 2835167 out.go:252] - Booting up control plane ...
I1121 14:44:07.487608 2835167 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1121 14:44:07.487710 2835167 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1121 14:44:07.487786 2835167 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1121 14:44:07.487905 2835167 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1121 14:44:07.488000 2835167 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1121 14:44:07.488044 2835167 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1121 14:44:07.488215 2835167 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1121 14:44:07.488300 2835167 kubeadm.go:319] [apiclient] All control plane components are healthy after 7.502585 seconds
I1121 14:44:07.488418 2835167 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1121 14:44:07.488571 2835167 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1121 14:44:07.488637 2835167 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1121 14:44:07.488849 2835167 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-092258 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1121 14:44:07.488911 2835167 kubeadm.go:319] [bootstrap-token] Using token: szaotk.n52uxpmszzhbby9z
I1121 14:44:07.491820 2835167 out.go:252] - Configuring RBAC rules ...
I1121 14:44:07.491949 2835167 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1121 14:44:07.492037 2835167 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1121 14:44:07.492184 2835167 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1121 14:44:07.492318 2835167 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1121 14:44:07.492450 2835167 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1121 14:44:07.492566 2835167 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1121 14:44:07.492691 2835167 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1121 14:44:07.492737 2835167 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1121 14:44:07.492785 2835167 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1121 14:44:07.492789 2835167 kubeadm.go:319]
I1121 14:44:07.492852 2835167 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1121 14:44:07.492857 2835167 kubeadm.go:319]
I1121 14:44:07.492938 2835167 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1121 14:44:07.492942 2835167 kubeadm.go:319]
I1121 14:44:07.492968 2835167 kubeadm.go:319] mkdir -p $HOME/.kube
I1121 14:44:07.493047 2835167 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1121 14:44:07.493101 2835167 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1121 14:44:07.493105 2835167 kubeadm.go:319]
I1121 14:44:07.493162 2835167 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1121 14:44:07.493166 2835167 kubeadm.go:319]
I1121 14:44:07.493217 2835167 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1121 14:44:07.493221 2835167 kubeadm.go:319]
I1121 14:44:07.493276 2835167 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1121 14:44:07.493355 2835167 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1121 14:44:07.493428 2835167 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1121 14:44:07.493432 2835167 kubeadm.go:319]
I1121 14:44:07.493521 2835167 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1121 14:44:07.493601 2835167 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1121 14:44:07.493607 2835167 kubeadm.go:319]
I1121 14:44:07.493695 2835167 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token szaotk.n52uxpmszzhbby9z \
I1121 14:44:07.493804 2835167 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:d756a1c258e082bbc06f965046f24233900a8e069c2a9d29a764f0b68af739ae \
I1121 14:44:07.493826 2835167 kubeadm.go:319] --control-plane
I1121 14:44:07.493830 2835167 kubeadm.go:319]
I1121 14:44:07.493920 2835167 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1121 14:44:07.493924 2835167 kubeadm.go:319]
I1121 14:44:07.494010 2835167 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token szaotk.n52uxpmszzhbby9z \
I1121 14:44:07.494129 2835167 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:d756a1c258e082bbc06f965046f24233900a8e069c2a9d29a764f0b68af739ae
I1121 14:44:07.494138 2835167 cni.go:84] Creating CNI manager for ""
I1121 14:44:07.494145 2835167 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:44:07.497166 2835167 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1121 14:44:07.500216 2835167 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1121 14:44:07.505987 2835167 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1121 14:44:07.506006 2835167 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1121 14:44:07.546131 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1121 14:44:08.532445 2835167 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1121 14:44:08.532548 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:08.532605 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-092258 minikube.k8s.io/updated_at=2025_11_21T14_44_08_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=29e0798733fefbdc471fd2bbb38f6a7ae2a26162 minikube.k8s.io/name=old-k8s-version-092258 minikube.k8s.io/primary=true
I1121 14:44:08.674460 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:08.674568 2835167 ops.go:34] apiserver oom_adj: -16
I1121 14:44:09.175394 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:09.675368 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:10.174563 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:10.675545 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:11.174568 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:11.675237 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:12.175098 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:12.675136 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:13.175409 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:13.674508 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:14.175483 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:14.674955 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:15.174638 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:15.674566 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:16.174919 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:16.674946 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:17.174624 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:17.675110 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:18.174609 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:18.674810 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:19.174819 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:19.675503 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:19.793386 2835167 kubeadm.go:1114] duration metric: took 11.260904779s to wait for elevateKubeSystemPrivileges
I1121 14:44:19.793428 2835167 kubeadm.go:403] duration metric: took 27.930140359s to StartCluster
I1121 14:44:19.793447 2835167 settings.go:142] acquiring lock: {Name:mkd6064915932eca5a3b1d70feb4ec8240f340da Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:44:19.793514 2835167 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21847-2633933/kubeconfig
I1121 14:44:19.794554 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/kubeconfig: {Name:mkd905aaf74d26e32c0b3e46a7edfbf13f4b98ec Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:44:19.794781 2835167 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1121 14:44:19.794907 2835167 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1121 14:44:19.795160 2835167 config.go:182] Loaded profile config "old-k8s-version-092258": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1121 14:44:19.795206 2835167 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1121 14:44:19.795273 2835167 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-092258"
I1121 14:44:19.795287 2835167 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-092258"
I1121 14:44:19.795308 2835167 host.go:66] Checking if "old-k8s-version-092258" exists ...
I1121 14:44:19.795815 2835167 cli_runner.go:164] Run: docker container inspect old-k8s-version-092258 --format={{.State.Status}}
I1121 14:44:19.795979 2835167 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-092258"
I1121 14:44:19.795995 2835167 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-092258"
I1121 14:44:19.796255 2835167 cli_runner.go:164] Run: docker container inspect old-k8s-version-092258 --format={{.State.Status}}
I1121 14:44:19.798851 2835167 out.go:179] * Verifying Kubernetes components...
I1121 14:44:19.806315 2835167 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:44:19.841770 2835167 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-092258"
I1121 14:44:19.841810 2835167 host.go:66] Checking if "old-k8s-version-092258" exists ...
I1121 14:44:19.842215 2835167 cli_runner.go:164] Run: docker container inspect old-k8s-version-092258 --format={{.State.Status}}
I1121 14:44:19.843021 2835167 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:44:19.845981 2835167 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:44:19.846003 2835167 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1121 14:44:19.846070 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:44:19.880288 2835167 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1121 14:44:19.880310 2835167 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1121 14:44:19.880371 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:44:19.888389 2835167 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:36720 SSHKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa Username:docker}
I1121 14:44:19.916676 2835167 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:36720 SSHKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa Username:docker}
I1121 14:44:20.244082 2835167 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1121 14:44:20.285170 2835167 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1121 14:44:20.285362 2835167 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:44:20.332070 2835167 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:44:21.059715 2835167 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-092258" to be "Ready" ...
I1121 14:44:21.059828 2835167 start.go:977] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I1121 14:44:21.566332 2835167 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-092258" context rescaled to 1 replicas
I1121 14:44:21.605475 2835167 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.273369049s)
I1121 14:44:21.608818 2835167 out.go:179] * Enabled addons: default-storageclass, storage-provisioner
I1121 14:44:21.611888 2835167 addons.go:530] duration metric: took 1.816656544s for enable addons: enabled=[default-storageclass storage-provisioner]
W1121 14:44:23.063813 2835167 node_ready.go:57] node "old-k8s-version-092258" has "Ready":"False" status (will retry)
W1121 14:44:25.563129 2835167 node_ready.go:57] node "old-k8s-version-092258" has "Ready":"False" status (will retry)
W1121 14:44:27.564011 2835167 node_ready.go:57] node "old-k8s-version-092258" has "Ready":"False" status (will retry)
W1121 14:44:30.063612 2835167 node_ready.go:57] node "old-k8s-version-092258" has "Ready":"False" status (will retry)
W1121 14:44:32.562923 2835167 node_ready.go:57] node "old-k8s-version-092258" has "Ready":"False" status (will retry)
I1121 14:44:33.563363 2835167 node_ready.go:49] node "old-k8s-version-092258" is "Ready"
I1121 14:44:33.563395 2835167 node_ready.go:38] duration metric: took 12.503648731s for node "old-k8s-version-092258" to be "Ready" ...
I1121 14:44:33.563409 2835167 api_server.go:52] waiting for apiserver process to appear ...
I1121 14:44:33.563474 2835167 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1121 14:44:33.580001 2835167 api_server.go:72] duration metric: took 13.7851816s to wait for apiserver process to appear ...
I1121 14:44:33.580026 2835167 api_server.go:88] waiting for apiserver healthz status ...
I1121 14:44:33.580045 2835167 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:44:33.589120 2835167 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I1121 14:44:33.590563 2835167 api_server.go:141] control plane version: v1.28.0
I1121 14:44:33.590586 2835167 api_server.go:131] duration metric: took 10.553339ms to wait for apiserver health ...
I1121 14:44:33.590594 2835167 system_pods.go:43] waiting for kube-system pods to appear ...
I1121 14:44:33.595235 2835167 system_pods.go:59] 8 kube-system pods found
I1121 14:44:33.595322 2835167 system_pods.go:61] "coredns-5dd5756b68-86stv" [6a48c3f2-f439-40e1-885b-5850f95d1ffc] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:44:33.595346 2835167 system_pods.go:61] "etcd-old-k8s-version-092258" [bbb172b1-cd74-44e9-ba24-92155ea08be4] Running
I1121 14:44:33.595390 2835167 system_pods.go:61] "kindnet-tfn5q" [6bec8380-6059-40d0-b0ed-6c3906f84591] Running
I1121 14:44:33.595417 2835167 system_pods.go:61] "kube-apiserver-old-k8s-version-092258" [adf091a2-7b6d-4ba0-a537-9c5f7f93c471] Running
I1121 14:44:33.595442 2835167 system_pods.go:61] "kube-controller-manager-old-k8s-version-092258" [01f77916-588d-469c-b175-3bbcdfe34ce8] Running
I1121 14:44:33.595479 2835167 system_pods.go:61] "kube-proxy-tdwt5" [94e025a3-f19d-40ce-b6a6-9e2eb3b8f998] Running
I1121 14:44:33.595506 2835167 system_pods.go:61] "kube-scheduler-old-k8s-version-092258" [7dfed185-93bd-4218-9c17-a6105d34022f] Running
I1121 14:44:33.595532 2835167 system_pods.go:61] "storage-provisioner" [a31c361f-8fb6-4726-a554-e70884e4d16e] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:44:33.595572 2835167 system_pods.go:74] duration metric: took 4.969827ms to wait for pod list to return data ...
I1121 14:44:33.595601 2835167 default_sa.go:34] waiting for default service account to be created ...
I1121 14:44:33.599253 2835167 default_sa.go:45] found service account: "default"
I1121 14:44:33.599325 2835167 default_sa.go:55] duration metric: took 3.703418ms for default service account to be created ...
I1121 14:44:33.599363 2835167 system_pods.go:116] waiting for k8s-apps to be running ...
I1121 14:44:33.603344 2835167 system_pods.go:86] 8 kube-system pods found
I1121 14:44:33.603423 2835167 system_pods.go:89] "coredns-5dd5756b68-86stv" [6a48c3f2-f439-40e1-885b-5850f95d1ffc] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:44:33.603457 2835167 system_pods.go:89] "etcd-old-k8s-version-092258" [bbb172b1-cd74-44e9-ba24-92155ea08be4] Running
I1121 14:44:33.603486 2835167 system_pods.go:89] "kindnet-tfn5q" [6bec8380-6059-40d0-b0ed-6c3906f84591] Running
I1121 14:44:33.603513 2835167 system_pods.go:89] "kube-apiserver-old-k8s-version-092258" [adf091a2-7b6d-4ba0-a537-9c5f7f93c471] Running
I1121 14:44:33.603549 2835167 system_pods.go:89] "kube-controller-manager-old-k8s-version-092258" [01f77916-588d-469c-b175-3bbcdfe34ce8] Running
I1121 14:44:33.603576 2835167 system_pods.go:89] "kube-proxy-tdwt5" [94e025a3-f19d-40ce-b6a6-9e2eb3b8f998] Running
I1121 14:44:33.603600 2835167 system_pods.go:89] "kube-scheduler-old-k8s-version-092258" [7dfed185-93bd-4218-9c17-a6105d34022f] Running
I1121 14:44:33.603640 2835167 system_pods.go:89] "storage-provisioner" [a31c361f-8fb6-4726-a554-e70884e4d16e] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:44:33.603680 2835167 retry.go:31] will retry after 248.130267ms: missing components: kube-dns
I1121 14:44:33.863548 2835167 system_pods.go:86] 8 kube-system pods found
I1121 14:44:33.863646 2835167 system_pods.go:89] "coredns-5dd5756b68-86stv" [6a48c3f2-f439-40e1-885b-5850f95d1ffc] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:44:33.863677 2835167 system_pods.go:89] "etcd-old-k8s-version-092258" [bbb172b1-cd74-44e9-ba24-92155ea08be4] Running
I1121 14:44:33.863699 2835167 system_pods.go:89] "kindnet-tfn5q" [6bec8380-6059-40d0-b0ed-6c3906f84591] Running
I1121 14:44:33.863735 2835167 system_pods.go:89] "kube-apiserver-old-k8s-version-092258" [adf091a2-7b6d-4ba0-a537-9c5f7f93c471] Running
I1121 14:44:33.863762 2835167 system_pods.go:89] "kube-controller-manager-old-k8s-version-092258" [01f77916-588d-469c-b175-3bbcdfe34ce8] Running
I1121 14:44:33.863787 2835167 system_pods.go:89] "kube-proxy-tdwt5" [94e025a3-f19d-40ce-b6a6-9e2eb3b8f998] Running
I1121 14:44:33.863827 2835167 system_pods.go:89] "kube-scheduler-old-k8s-version-092258" [7dfed185-93bd-4218-9c17-a6105d34022f] Running
I1121 14:44:33.863857 2835167 system_pods.go:89] "storage-provisioner" [a31c361f-8fb6-4726-a554-e70884e4d16e] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:44:33.863904 2835167 retry.go:31] will retry after 379.807267ms: missing components: kube-dns
I1121 14:44:34.248297 2835167 system_pods.go:86] 8 kube-system pods found
I1121 14:44:34.248331 2835167 system_pods.go:89] "coredns-5dd5756b68-86stv" [6a48c3f2-f439-40e1-885b-5850f95d1ffc] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:44:34.248338 2835167 system_pods.go:89] "etcd-old-k8s-version-092258" [bbb172b1-cd74-44e9-ba24-92155ea08be4] Running
I1121 14:44:34.248344 2835167 system_pods.go:89] "kindnet-tfn5q" [6bec8380-6059-40d0-b0ed-6c3906f84591] Running
I1121 14:44:34.248348 2835167 system_pods.go:89] "kube-apiserver-old-k8s-version-092258" [adf091a2-7b6d-4ba0-a537-9c5f7f93c471] Running
I1121 14:44:34.248352 2835167 system_pods.go:89] "kube-controller-manager-old-k8s-version-092258" [01f77916-588d-469c-b175-3bbcdfe34ce8] Running
I1121 14:44:34.248356 2835167 system_pods.go:89] "kube-proxy-tdwt5" [94e025a3-f19d-40ce-b6a6-9e2eb3b8f998] Running
I1121 14:44:34.248360 2835167 system_pods.go:89] "kube-scheduler-old-k8s-version-092258" [7dfed185-93bd-4218-9c17-a6105d34022f] Running
I1121 14:44:34.248365 2835167 system_pods.go:89] "storage-provisioner" [a31c361f-8fb6-4726-a554-e70884e4d16e] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:44:34.248380 2835167 retry.go:31] will retry after 418.10052ms: missing components: kube-dns
I1121 14:44:34.670581 2835167 system_pods.go:86] 8 kube-system pods found
I1121 14:44:34.670670 2835167 system_pods.go:89] "coredns-5dd5756b68-86stv" [6a48c3f2-f439-40e1-885b-5850f95d1ffc] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:44:34.670687 2835167 system_pods.go:89] "etcd-old-k8s-version-092258" [bbb172b1-cd74-44e9-ba24-92155ea08be4] Running
I1121 14:44:34.670694 2835167 system_pods.go:89] "kindnet-tfn5q" [6bec8380-6059-40d0-b0ed-6c3906f84591] Running
I1121 14:44:34.670698 2835167 system_pods.go:89] "kube-apiserver-old-k8s-version-092258" [adf091a2-7b6d-4ba0-a537-9c5f7f93c471] Running
I1121 14:44:34.670703 2835167 system_pods.go:89] "kube-controller-manager-old-k8s-version-092258" [01f77916-588d-469c-b175-3bbcdfe34ce8] Running
I1121 14:44:34.670707 2835167 system_pods.go:89] "kube-proxy-tdwt5" [94e025a3-f19d-40ce-b6a6-9e2eb3b8f998] Running
I1121 14:44:34.670711 2835167 system_pods.go:89] "kube-scheduler-old-k8s-version-092258" [7dfed185-93bd-4218-9c17-a6105d34022f] Running
I1121 14:44:34.670736 2835167 system_pods.go:89] "storage-provisioner" [a31c361f-8fb6-4726-a554-e70884e4d16e] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:44:34.670759 2835167 retry.go:31] will retry after 454.42102ms: missing components: kube-dns
I1121 14:44:35.130522 2835167 system_pods.go:86] 8 kube-system pods found
I1121 14:44:35.130555 2835167 system_pods.go:89] "coredns-5dd5756b68-86stv" [6a48c3f2-f439-40e1-885b-5850f95d1ffc] Running
I1121 14:44:35.130563 2835167 system_pods.go:89] "etcd-old-k8s-version-092258" [bbb172b1-cd74-44e9-ba24-92155ea08be4] Running
I1121 14:44:35.130568 2835167 system_pods.go:89] "kindnet-tfn5q" [6bec8380-6059-40d0-b0ed-6c3906f84591] Running
I1121 14:44:35.130573 2835167 system_pods.go:89] "kube-apiserver-old-k8s-version-092258" [adf091a2-7b6d-4ba0-a537-9c5f7f93c471] Running
I1121 14:44:35.130579 2835167 system_pods.go:89] "kube-controller-manager-old-k8s-version-092258" [01f77916-588d-469c-b175-3bbcdfe34ce8] Running
I1121 14:44:35.130582 2835167 system_pods.go:89] "kube-proxy-tdwt5" [94e025a3-f19d-40ce-b6a6-9e2eb3b8f998] Running
I1121 14:44:35.130586 2835167 system_pods.go:89] "kube-scheduler-old-k8s-version-092258" [7dfed185-93bd-4218-9c17-a6105d34022f] Running
I1121 14:44:35.130590 2835167 system_pods.go:89] "storage-provisioner" [a31c361f-8fb6-4726-a554-e70884e4d16e] Running
I1121 14:44:35.130598 2835167 system_pods.go:126] duration metric: took 1.531191935s to wait for k8s-apps to be running ...
I1121 14:44:35.130606 2835167 system_svc.go:44] waiting for kubelet service to be running ....
I1121 14:44:35.130663 2835167 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1121 14:44:35.145395 2835167 system_svc.go:56] duration metric: took 14.776546ms WaitForService to wait for kubelet
I1121 14:44:35.145455 2835167 kubeadm.go:587] duration metric: took 15.350619907s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1121 14:44:35.145475 2835167 node_conditions.go:102] verifying NodePressure condition ...
I1121 14:44:35.148334 2835167 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1121 14:44:35.148369 2835167 node_conditions.go:123] node cpu capacity is 2
I1121 14:44:35.148382 2835167 node_conditions.go:105] duration metric: took 2.896581ms to run NodePressure ...
I1121 14:44:35.148393 2835167 start.go:242] waiting for startup goroutines ...
I1121 14:44:35.148401 2835167 start.go:247] waiting for cluster config update ...
I1121 14:44:35.148412 2835167 start.go:256] writing updated cluster config ...
I1121 14:44:35.148743 2835167 ssh_runner.go:195] Run: rm -f paused
I1121 14:44:35.152681 2835167 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:44:35.157000 2835167 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-86stv" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.162556 2835167 pod_ready.go:94] pod "coredns-5dd5756b68-86stv" is "Ready"
I1121 14:44:35.162601 2835167 pod_ready.go:86] duration metric: took 5.502719ms for pod "coredns-5dd5756b68-86stv" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.166472 2835167 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.171935 2835167 pod_ready.go:94] pod "etcd-old-k8s-version-092258" is "Ready"
I1121 14:44:35.171965 2835167 pod_ready.go:86] duration metric: took 5.463835ms for pod "etcd-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.175582 2835167 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.181518 2835167 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-092258" is "Ready"
I1121 14:44:35.181551 2835167 pod_ready.go:86] duration metric: took 5.941771ms for pod "kube-apiserver-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.184926 2835167 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.557460 2835167 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-092258" is "Ready"
I1121 14:44:35.557489 2835167 pod_ready.go:86] duration metric: took 372.537001ms for pod "kube-controller-manager-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.757817 2835167 pod_ready.go:83] waiting for pod "kube-proxy-tdwt5" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:36.157592 2835167 pod_ready.go:94] pod "kube-proxy-tdwt5" is "Ready"
I1121 14:44:36.157618 2835167 pod_ready.go:86] duration metric: took 399.771111ms for pod "kube-proxy-tdwt5" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:36.357529 2835167 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:36.757566 2835167 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-092258" is "Ready"
I1121 14:44:36.757596 2835167 pod_ready.go:86] duration metric: took 400.036784ms for pod "kube-scheduler-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:36.757610 2835167 pod_ready.go:40] duration metric: took 1.604896006s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:44:36.818445 2835167 start.go:628] kubectl: 1.33.2, cluster: 1.28.0 (minor skew: 5)
I1121 14:44:36.821296 2835167 out.go:203]
W1121 14:44:36.824281 2835167 out.go:285] ! /usr/local/bin/kubectl is version 1.33.2, which may have incompatibilities with Kubernetes 1.28.0.
I1121 14:44:36.827383 2835167 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1121 14:44:36.830301 2835167 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-092258" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
ab7b2c1339a58 1611cd07b61d5 7 seconds ago Running busybox 0 befa3559e32d9 busybox default
4fa0544fe52cc 97e04611ad434 13 seconds ago Running coredns 0 b58b59f73a24b coredns-5dd5756b68-86stv kube-system
c6ace07879b84 ba04bb24b9575 13 seconds ago Running storage-provisioner 0 3680e435bb193 storage-provisioner kube-system
495595ef81ee7 b1a8c6f707935 24 seconds ago Running kindnet-cni 0 f4ddede8f051f kindnet-tfn5q kube-system
630ebb9fe56a1 940f54a5bcae9 26 seconds ago Running kube-proxy 0 1812faa70a69a kube-proxy-tdwt5 kube-system
331a280f7d8fb 46cc66ccc7c19 46 seconds ago Running kube-controller-manager 0 9d7554dad7608 kube-controller-manager-old-k8s-version-092258 kube-system
46391c1bd1fc7 762dce4090c5f 46 seconds ago Running kube-scheduler 0 88bf0a72d6a98 kube-scheduler-old-k8s-version-092258 kube-system
32a76684e0ad4 9cdd6470f48c8 46 seconds ago Running etcd 0 edaf6d16372ae etcd-old-k8s-version-092258 kube-system
2e1cd1261e99f 00543d2fe5d71 46 seconds ago Running kube-apiserver 0 58f4b63de6fd5 kube-apiserver-old-k8s-version-092258 kube-system
==> containerd <==
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.681687355Z" level=info msg="CreateContainer within sandbox \"3680e435bb193d749f6cac5ee0a23ca21a777ba606c46a9f454cb42ef4060e47\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"c6ace07879b84e705bc8b532f8cd9162404b63746ad9faeae44e245e26539ead\""
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.685318192Z" level=info msg="StartContainer for \"c6ace07879b84e705bc8b532f8cd9162404b63746ad9faeae44e245e26539ead\""
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.689247614Z" level=info msg="connecting to shim c6ace07879b84e705bc8b532f8cd9162404b63746ad9faeae44e245e26539ead" address="unix:///run/containerd/s/6e69ecb1899b9e75727f8fe7f211e1f82d40f965205bb1565eeae343c2bafd56" protocol=ttrpc version=3
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.690732150Z" level=info msg="CreateContainer within sandbox \"b58b59f73a24bb52a5f6c210ec1d0dfbddbbc55dbc0fd609423879994aa0b8ea\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"4fa0544fe52cc0b0b57fcb28182f1f20dc7c79b3ef53dcb6dc677efecd5a9cc9\""
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.693417924Z" level=info msg="StartContainer for \"4fa0544fe52cc0b0b57fcb28182f1f20dc7c79b3ef53dcb6dc677efecd5a9cc9\""
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.696432221Z" level=info msg="connecting to shim 4fa0544fe52cc0b0b57fcb28182f1f20dc7c79b3ef53dcb6dc677efecd5a9cc9" address="unix:///run/containerd/s/a5f36c12d3eba8a08addb4ff6f6c45f4b1f35adc7b831563646c8ea27992d003" protocol=ttrpc version=3
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.785690082Z" level=info msg="StartContainer for \"4fa0544fe52cc0b0b57fcb28182f1f20dc7c79b3ef53dcb6dc677efecd5a9cc9\" returns successfully"
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.815503407Z" level=info msg="StartContainer for \"c6ace07879b84e705bc8b532f8cd9162404b63746ad9faeae44e245e26539ead\" returns successfully"
Nov 21 14:44:37 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:37.378935963Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4fd396a4-7f86-4bac-b99a-f7427bb5deb9,Namespace:default,Attempt:0,}"
Nov 21 14:44:37 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:37.481124358Z" level=info msg="connecting to shim befa3559e32d903c1abf0bc725ae5f12a26cdbb8b3fb4a57980282d9931d9d26" address="unix:///run/containerd/s/71dcf6bf5df9beb4a3d248e771df5a382c0db1f3a2b82a021424cdeb0bc07ccb" namespace=k8s.io protocol=ttrpc version=3
Nov 21 14:44:37 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:37.544176700Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4fd396a4-7f86-4bac-b99a-f7427bb5deb9,Namespace:default,Attempt:0,} returns sandbox id \"befa3559e32d903c1abf0bc725ae5f12a26cdbb8b3fb4a57980282d9931d9d26\""
Nov 21 14:44:37 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:37.546307355Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.885153902Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.887206340Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=1937188"
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.889567142Z" level=info msg="ImageCreate event name:\"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.893770222Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.894536985Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"1935750\" in 2.348184988s"
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.894577862Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\""
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.898592935Z" level=info msg="CreateContainer within sandbox \"befa3559e32d903c1abf0bc725ae5f12a26cdbb8b3fb4a57980282d9931d9d26\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.912721761Z" level=info msg="Container ab7b2c1339a58ca880ca0312fd5f7d62085c7261261bf3758b721a01af22d534: CDI devices from CRI Config.CDIDevices: []"
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.926024035Z" level=info msg="CreateContainer within sandbox \"befa3559e32d903c1abf0bc725ae5f12a26cdbb8b3fb4a57980282d9931d9d26\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"ab7b2c1339a58ca880ca0312fd5f7d62085c7261261bf3758b721a01af22d534\""
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.927048695Z" level=info msg="StartContainer for \"ab7b2c1339a58ca880ca0312fd5f7d62085c7261261bf3758b721a01af22d534\""
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.928757405Z" level=info msg="connecting to shim ab7b2c1339a58ca880ca0312fd5f7d62085c7261261bf3758b721a01af22d534" address="unix:///run/containerd/s/71dcf6bf5df9beb4a3d248e771df5a382c0db1f3a2b82a021424cdeb0bc07ccb" protocol=ttrpc version=3
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.996819622Z" level=info msg="StartContainer for \"ab7b2c1339a58ca880ca0312fd5f7d62085c7261261bf3758b721a01af22d534\" returns successfully"
Nov 21 14:44:46 old-k8s-version-092258 containerd[760]: E1121 14:44:46.197863 760 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [4fa0544fe52cc0b0b57fcb28182f1f20dc7c79b3ef53dcb6dc677efecd5a9cc9] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = b7aacdf6a6aa730aafe4d018cac9b7b5ecfb346cba84a99f64521f87aef8b4958639c1cf97967716465791d05bd38f372615327b7cb1d93c850bae532744d54d
CoreDNS-1.10.1
linux/arm64, go1.20, 055b2c3
[INFO] 127.0.0.1:36979 - 27014 "HINFO IN 2294269810657567619.5005884824654199478. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.03500164s
==> describe nodes <==
Name: old-k8s-version-092258
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=old-k8s-version-092258
kubernetes.io/os=linux
minikube.k8s.io/commit=29e0798733fefbdc471fd2bbb38f6a7ae2a26162
minikube.k8s.io/name=old-k8s-version-092258
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_21T14_44_08_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 21 Nov 2025 14:44:04 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-092258
AcquireTime: <unset>
RenewTime: Fri, 21 Nov 2025 14:44:38 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 21 Nov 2025 14:44:37 +0000 Fri, 21 Nov 2025 14:44:00 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 21 Nov 2025 14:44:37 +0000 Fri, 21 Nov 2025 14:44:00 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 21 Nov 2025 14:44:37 +0000 Fri, 21 Nov 2025 14:44:00 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 21 Nov 2025 14:44:37 +0000 Fri, 21 Nov 2025 14:44:33 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: old-k8s-version-092258
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 952c288fdad5a6f53a4deda5691cff59
System UUID: 9e4fe947-6f95-4914-9cd3-ccd713480a21
Boot ID: 41b0e09d-5a9a-49c9-8980-dca608ba3fce
Kernel Version: 5.15.0-1084-aws
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 10s
kube-system coredns-5dd5756b68-86stv 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 27s
kube-system etcd-old-k8s-version-092258 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 40s
kube-system kindnet-tfn5q 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 28s
kube-system kube-apiserver-old-k8s-version-092258 250m (12%) 0 (0%) 0 (0%) 0 (0%) 40s
kube-system kube-controller-manager-old-k8s-version-092258 200m (10%) 0 (0%) 0 (0%) 0 (0%) 40s
kube-system kube-proxy-tdwt5 0 (0%) 0 (0%) 0 (0%) 0 (0%) 28s
kube-system kube-scheduler-old-k8s-version-092258 100m (5%) 0 (0%) 0 (0%) 0 (0%) 40s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 26s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 100m (5%)
memory 220Mi (2%) 220Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 26s kube-proxy
Normal Starting 40s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 40s kubelet Node old-k8s-version-092258 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 40s kubelet Node old-k8s-version-092258 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 40s kubelet Node old-k8s-version-092258 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 40s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 28s node-controller Node old-k8s-version-092258 event: Registered Node old-k8s-version-092258 in Controller
Normal NodeReady 14s kubelet Node old-k8s-version-092258 status is now: NodeReady
==> dmesg <==
[Nov21 13:02] overlayfs: idmapped layers are currently not supported
[Nov21 13:03] overlayfs: idmapped layers are currently not supported
[Nov21 13:06] overlayfs: idmapped layers are currently not supported
[Nov21 13:08] overlayfs: idmapped layers are currently not supported
[Nov21 13:09] overlayfs: idmapped layers are currently not supported
[Nov21 13:10] overlayfs: idmapped layers are currently not supported
[ +19.808801] overlayfs: idmapped layers are currently not supported
[Nov21 13:11] overlayfs: idmapped layers are currently not supported
[Nov21 13:12] overlayfs: idmapped layers are currently not supported
[Nov21 13:13] overlayfs: idmapped layers are currently not supported
[Nov21 13:14] overlayfs: idmapped layers are currently not supported
[Nov21 13:15] overlayfs: idmapped layers are currently not supported
[ +16.772572] overlayfs: idmapped layers are currently not supported
[Nov21 13:16] overlayfs: idmapped layers are currently not supported
[Nov21 13:17] overlayfs: idmapped layers are currently not supported
[ +27.396777] overlayfs: idmapped layers are currently not supported
[Nov21 13:18] overlayfs: idmapped layers are currently not supported
[ +25.430119] overlayfs: idmapped layers are currently not supported
[Nov21 13:19] overlayfs: idmapped layers are currently not supported
[Nov21 13:20] overlayfs: idmapped layers are currently not supported
[Nov21 13:21] overlayfs: idmapped layers are currently not supported
[Nov21 13:22] overlayfs: idmapped layers are currently not supported
[Nov21 13:23] overlayfs: idmapped layers are currently not supported
[Nov21 13:24] overlayfs: idmapped layers are currently not supported
[Nov21 13:55] kauditd_printk_skb: 8 callbacks suppressed
==> etcd [32a76684e0ad48afa24dffa56bbd612225875cea5526f2fe91da5620cdd3737e] <==
{"level":"info","ts":"2025-11-21T14:44:00.857351Z","caller":"fileutil/purge.go:44","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap.db","max":5,"interval":"30s"}
{"level":"info","ts":"2025-11-21T14:44:00.860773Z","caller":"fileutil/purge.go:44","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"}
{"level":"info","ts":"2025-11-21T14:44:00.860806Z","caller":"fileutil/purge.go:44","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"}
{"level":"info","ts":"2025-11-21T14:44:00.857529Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 switched to configuration voters=(16896983918768216326)"}
{"level":"info","ts":"2025-11-21T14:44:00.861198Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","added-peer-id":"ea7e25599daad906","added-peer-peer-urls":["https://192.168.76.2:2380"]}
{"level":"info","ts":"2025-11-21T14:44:00.857558Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"ea7e25599daad906","initial-advertise-peer-urls":["https://192.168.76.2:2380"],"listen-peer-urls":["https://192.168.76.2:2380"],"advertise-client-urls":["https://192.168.76.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.76.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-21T14:44:00.857577Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-21T14:44:01.029245Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-21T14:44:01.029466Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-21T14:44:01.02957Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgPreVoteResp from ea7e25599daad906 at term 1"}
{"level":"info","ts":"2025-11-21T14:44:01.029683Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became candidate at term 2"}
{"level":"info","ts":"2025-11-21T14:44:01.029774Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgVoteResp from ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-21T14:44:01.029858Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became leader at term 2"}
{"level":"info","ts":"2025-11-21T14:44:01.029946Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: ea7e25599daad906 elected leader ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-21T14:44:01.033213Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"ea7e25599daad906","local-member-attributes":"{Name:old-k8s-version-092258 ClientURLs:[https://192.168.76.2:2379]}","request-path":"/0/members/ea7e25599daad906/attributes","cluster-id":"6f20f2c4b2fb5f8a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-21T14:44:01.033415Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-21T14:44:01.034523Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-21T14:44:01.034765Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:44:01.035192Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-21T14:44:01.036885Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:44:01.04112Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:44:01.041303Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:44:01.055053Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.76.2:2379"}
{"level":"info","ts":"2025-11-21T14:44:01.060057Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-21T14:44:01.060254Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
==> kernel <==
14:44:47 up 19:27, 0 user, load average: 2.19, 3.09, 2.75
Linux old-k8s-version-092258 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [495595ef81ee7d983a4b62890080114a468713ef14bf361720fb1ef51e30f35d] <==
I1121 14:44:22.827794 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1121 14:44:22.828022 1 main.go:139] hostIP = 192.168.76.2
podIP = 192.168.76.2
I1121 14:44:22.828147 1 main.go:148] setting mtu 1500 for CNI
I1121 14:44:22.828164 1 main.go:178] kindnetd IP family: "ipv4"
I1121 14:44:22.828175 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-21T14:44:23Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1121 14:44:23.024438 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1121 14:44:23.024516 1 controller.go:381] "Waiting for informer caches to sync"
I1121 14:44:23.024545 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1121 14:44:23.025736 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1121 14:44:23.224664 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1121 14:44:23.224747 1 metrics.go:72] Registering metrics
I1121 14:44:23.224843 1 controller.go:711] "Syncing nftables rules"
I1121 14:44:33.032002 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1121 14:44:33.032055 1 main.go:301] handling current node
I1121 14:44:43.024839 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1121 14:44:43.024870 1 main.go:301] handling current node
==> kube-apiserver [2e1cd1261e99f5cf421f076a966eedd90258d75cd1735ec5e4bc9ae1d5576945] <==
I1121 14:44:04.361764 1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
I1121 14:44:04.361814 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1121 14:44:04.367613 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1121 14:44:04.367684 1 aggregator.go:166] initial CRD sync complete...
I1121 14:44:04.367697 1 autoregister_controller.go:141] Starting autoregister controller
I1121 14:44:04.367705 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1121 14:44:04.367714 1 cache.go:39] Caches are synced for autoregister controller
I1121 14:44:04.374026 1 controller.go:624] quota admission added evaluator for: namespaces
I1121 14:44:04.403168 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1121 14:44:04.422675 1 shared_informer.go:318] Caches are synced for node_authorizer
I1121 14:44:04.968994 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1121 14:44:04.974442 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1121 14:44:04.974470 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1121 14:44:05.722694 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1121 14:44:05.789801 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1121 14:44:05.889316 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1121 14:44:05.896471 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I1121 14:44:05.897760 1 controller.go:624] quota admission added evaluator for: endpoints
I1121 14:44:05.902883 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1121 14:44:06.203440 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1121 14:44:07.370770 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1121 14:44:07.383830 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1121 14:44:07.398248 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1121 14:44:19.834381 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1121 14:44:20.024025 1 controller.go:624] quota admission added evaluator for: replicasets.apps
==> kube-controller-manager [331a280f7d8fb0893c46a22085825d84571038b23952dd64524b062bc7f08b74] <==
I1121 14:44:19.212645 1 shared_informer.go:318] Caches are synced for endpoint
I1121 14:44:19.212738 1 shared_informer.go:318] Caches are synced for HPA
I1121 14:44:19.212773 1 shared_informer.go:318] Caches are synced for disruption
I1121 14:44:19.212803 1 shared_informer.go:318] Caches are synced for attach detach
I1121 14:44:19.627294 1 shared_informer.go:318] Caches are synced for garbage collector
I1121 14:44:19.659367 1 shared_informer.go:318] Caches are synced for garbage collector
I1121 14:44:19.659575 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1121 14:44:19.901701 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-tdwt5"
I1121 14:44:19.929907 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-tfn5q"
I1121 14:44:20.044696 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1121 14:44:20.152846 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-v7mnp"
I1121 14:44:20.183528 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-86stv"
I1121 14:44:20.224114 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="183.103851ms"
I1121 14:44:20.243991 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="19.819807ms"
I1121 14:44:20.244109 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="83.6µs"
I1121 14:44:21.107620 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1121 14:44:21.134973 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-v7mnp"
I1121 14:44:21.155140 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="49.168473ms"
I1121 14:44:21.171976 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="16.779877ms"
I1121 14:44:21.172179 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="46.497µs"
I1121 14:44:33.168291 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="88.391µs"
I1121 14:44:33.193460 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="100.346µs"
I1121 14:44:34.128063 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
I1121 14:44:34.848411 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="19.00588ms"
I1121 14:44:34.848685 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="129.432µs"
==> kube-proxy [630ebb9fe56a1bea1ef2dfe24de2086594eb0afbdaf547e41ce7c777d9eb7705] <==
I1121 14:44:20.860188 1 server_others.go:69] "Using iptables proxy"
I1121 14:44:20.878393 1 node.go:141] Successfully retrieved node IP: 192.168.76.2
I1121 14:44:20.931156 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1121 14:44:20.936886 1 server_others.go:152] "Using iptables Proxier"
I1121 14:44:20.936939 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1121 14:44:20.936948 1 server_others.go:438] "Defaulting to no-op detect-local"
I1121 14:44:20.936971 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1121 14:44:20.937761 1 server.go:846] "Version info" version="v1.28.0"
I1121 14:44:20.937784 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1121 14:44:20.938544 1 config.go:188] "Starting service config controller"
I1121 14:44:20.938593 1 shared_informer.go:311] Waiting for caches to sync for service config
I1121 14:44:20.938625 1 config.go:97] "Starting endpoint slice config controller"
I1121 14:44:20.938635 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1121 14:44:20.940292 1 config.go:315] "Starting node config controller"
I1121 14:44:20.940306 1 shared_informer.go:311] Waiting for caches to sync for node config
I1121 14:44:21.040184 1 shared_informer.go:318] Caches are synced for endpoint slice config
I1121 14:44:21.040242 1 shared_informer.go:318] Caches are synced for service config
I1121 14:44:21.040487 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [46391c1bd1fc737d22bd847c1d63f9bd14e4d892ef33d465e9204dc377dd6002] <==
W1121 14:44:04.821909 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1121 14:44:04.822028 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W1121 14:44:04.822197 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1121 14:44:04.822220 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1121 14:44:04.824601 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1121 14:44:04.825484 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1121 14:44:04.824774 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1121 14:44:04.825902 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1121 14:44:04.826065 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1121 14:44:04.826044 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1121 14:44:04.824991 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1121 14:44:04.826420 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1121 14:44:04.825212 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1121 14:44:04.825284 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1121 14:44:04.825347 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1121 14:44:04.825382 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1121 14:44:04.825431 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1121 14:44:04.824928 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1121 14:44:04.826802 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1121 14:44:04.826945 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1121 14:44:04.827063 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1121 14:44:04.827207 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1121 14:44:04.827355 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1121 14:44:04.827495 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
I1121 14:44:06.304765 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.149863 1526 kuberuntime_manager.go:1463] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.150498 1526 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.918591 1526 topology_manager.go:215] "Topology Admit Handler" podUID="94e025a3-f19d-40ce-b6a6-9e2eb3b8f998" podNamespace="kube-system" podName="kube-proxy-tdwt5"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.954210 1526 topology_manager.go:215] "Topology Admit Handler" podUID="6bec8380-6059-40d0-b0ed-6c3906f84591" podNamespace="kube-system" podName="kindnet-tfn5q"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.980360 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/94e025a3-f19d-40ce-b6a6-9e2eb3b8f998-kube-proxy\") pod \"kube-proxy-tdwt5\" (UID: \"94e025a3-f19d-40ce-b6a6-9e2eb3b8f998\") " pod="kube-system/kube-proxy-tdwt5"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.980619 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/94e025a3-f19d-40ce-b6a6-9e2eb3b8f998-xtables-lock\") pod \"kube-proxy-tdwt5\" (UID: \"94e025a3-f19d-40ce-b6a6-9e2eb3b8f998\") " pod="kube-system/kube-proxy-tdwt5"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.980760 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/94e025a3-f19d-40ce-b6a6-9e2eb3b8f998-lib-modules\") pod \"kube-proxy-tdwt5\" (UID: \"94e025a3-f19d-40ce-b6a6-9e2eb3b8f998\") " pod="kube-system/kube-proxy-tdwt5"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.980886 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/6bec8380-6059-40d0-b0ed-6c3906f84591-cni-cfg\") pod \"kindnet-tfn5q\" (UID: \"6bec8380-6059-40d0-b0ed-6c3906f84591\") " pod="kube-system/kindnet-tfn5q"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.981004 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/6bec8380-6059-40d0-b0ed-6c3906f84591-xtables-lock\") pod \"kindnet-tfn5q\" (UID: \"6bec8380-6059-40d0-b0ed-6c3906f84591\") " pod="kube-system/kindnet-tfn5q"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.981145 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/6bec8380-6059-40d0-b0ed-6c3906f84591-lib-modules\") pod \"kindnet-tfn5q\" (UID: \"6bec8380-6059-40d0-b0ed-6c3906f84591\") " pod="kube-system/kindnet-tfn5q"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.981319 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5lf7\" (UniqueName: \"kubernetes.io/projected/6bec8380-6059-40d0-b0ed-6c3906f84591-kube-api-access-m5lf7\") pod \"kindnet-tfn5q\" (UID: \"6bec8380-6059-40d0-b0ed-6c3906f84591\") " pod="kube-system/kindnet-tfn5q"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.981442 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2rxs\" (UniqueName: \"kubernetes.io/projected/94e025a3-f19d-40ce-b6a6-9e2eb3b8f998-kube-api-access-g2rxs\") pod \"kube-proxy-tdwt5\" (UID: \"94e025a3-f19d-40ce-b6a6-9e2eb3b8f998\") " pod="kube-system/kube-proxy-tdwt5"
Nov 21 14:44:22 old-k8s-version-092258 kubelet[1526]: I1121 14:44:22.794618 1526 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-tdwt5" podStartSLOduration=3.794572825 podCreationTimestamp="2025-11-21 14:44:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:44:21.775006119 +0000 UTC m=+14.440254134" watchObservedRunningTime="2025-11-21 14:44:22.794572825 +0000 UTC m=+15.459820816"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.136665 1526 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.168328 1526 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-tfn5q" podStartSLOduration=12.176504309 podCreationTimestamp="2025-11-21 14:44:19 +0000 UTC" firstStartedPulling="2025-11-21 14:44:20.481898213 +0000 UTC m=+13.147146196" lastFinishedPulling="2025-11-21 14:44:22.473675721 +0000 UTC m=+15.138923704" observedRunningTime="2025-11-21 14:44:22.795889554 +0000 UTC m=+15.461137546" watchObservedRunningTime="2025-11-21 14:44:33.168281817 +0000 UTC m=+25.833529808"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.168810 1526 topology_manager.go:215] "Topology Admit Handler" podUID="6a48c3f2-f439-40e1-885b-5850f95d1ffc" podNamespace="kube-system" podName="coredns-5dd5756b68-86stv"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.174944 1526 topology_manager.go:215] "Topology Admit Handler" podUID="a31c361f-8fb6-4726-a554-e70884e4d16e" podNamespace="kube-system" podName="storage-provisioner"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.200360 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktnhs\" (UniqueName: \"kubernetes.io/projected/6a48c3f2-f439-40e1-885b-5850f95d1ffc-kube-api-access-ktnhs\") pod \"coredns-5dd5756b68-86stv\" (UID: \"6a48c3f2-f439-40e1-885b-5850f95d1ffc\") " pod="kube-system/coredns-5dd5756b68-86stv"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.200594 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/a31c361f-8fb6-4726-a554-e70884e4d16e-tmp\") pod \"storage-provisioner\" (UID: \"a31c361f-8fb6-4726-a554-e70884e4d16e\") " pod="kube-system/storage-provisioner"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.200711 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxf7g\" (UniqueName: \"kubernetes.io/projected/a31c361f-8fb6-4726-a554-e70884e4d16e-kube-api-access-xxf7g\") pod \"storage-provisioner\" (UID: \"a31c361f-8fb6-4726-a554-e70884e4d16e\") " pod="kube-system/storage-provisioner"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.200832 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6a48c3f2-f439-40e1-885b-5850f95d1ffc-config-volume\") pod \"coredns-5dd5756b68-86stv\" (UID: \"6a48c3f2-f439-40e1-885b-5850f95d1ffc\") " pod="kube-system/coredns-5dd5756b68-86stv"
Nov 21 14:44:34 old-k8s-version-092258 kubelet[1526]: I1121 14:44:34.812385 1526 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=13.812339422 podCreationTimestamp="2025-11-21 14:44:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:44:34.811999567 +0000 UTC m=+27.477247550" watchObservedRunningTime="2025-11-21 14:44:34.812339422 +0000 UTC m=+27.477587405"
Nov 21 14:44:34 old-k8s-version-092258 kubelet[1526]: I1121 14:44:34.830835 1526 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-86stv" podStartSLOduration=14.83078559 podCreationTimestamp="2025-11-21 14:44:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:44:34.830509339 +0000 UTC m=+27.495757330" watchObservedRunningTime="2025-11-21 14:44:34.83078559 +0000 UTC m=+27.496033581"
Nov 21 14:44:37 old-k8s-version-092258 kubelet[1526]: I1121 14:44:37.064261 1526 topology_manager.go:215] "Topology Admit Handler" podUID="4fd396a4-7f86-4bac-b99a-f7427bb5deb9" podNamespace="default" podName="busybox"
Nov 21 14:44:37 old-k8s-version-092258 kubelet[1526]: I1121 14:44:37.128201 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmbgq\" (UniqueName: \"kubernetes.io/projected/4fd396a4-7f86-4bac-b99a-f7427bb5deb9-kube-api-access-tmbgq\") pod \"busybox\" (UID: \"4fd396a4-7f86-4bac-b99a-f7427bb5deb9\") " pod="default/busybox"
==> storage-provisioner [c6ace07879b84e705bc8b532f8cd9162404b63746ad9faeae44e245e26539ead] <==
I1121 14:44:33.821827 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1121 14:44:33.835269 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1121 14:44:33.835522 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1121 14:44:33.844745 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1121 14:44:33.845108 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-092258_43824c0e-5444-4d63-9465-8f0bcb9e3d2b!
I1121 14:44:33.845246 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"d6d6cfaa-85d7-41d0-9ba2-d501adb4d7fd", APIVersion:"v1", ResourceVersion:"394", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-092258_43824c0e-5444-4d63-9465-8f0bcb9e3d2b became leader
I1121 14:44:33.946309 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-092258_43824c0e-5444-4d63-9465-8f0bcb9e3d2b!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-092258 -n old-k8s-version-092258
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-092258 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-092258
helpers_test.go:243: (dbg) docker inspect old-k8s-version-092258:
-- stdout --
[
{
"Id": "06d5dd86afe167e6eb08ea3044c0d6356df71a87f2b7acc2267f870459c4f2cd",
"Created": "2025-11-21T14:43:42.553015288Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 2835562,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-21T14:43:42.615467666Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:6dfeb5329cc83d126555f88f43b09eef7a09c7f546c9166b94d33747df91b6df",
"ResolvConfPath": "/var/lib/docker/containers/06d5dd86afe167e6eb08ea3044c0d6356df71a87f2b7acc2267f870459c4f2cd/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/06d5dd86afe167e6eb08ea3044c0d6356df71a87f2b7acc2267f870459c4f2cd/hostname",
"HostsPath": "/var/lib/docker/containers/06d5dd86afe167e6eb08ea3044c0d6356df71a87f2b7acc2267f870459c4f2cd/hosts",
"LogPath": "/var/lib/docker/containers/06d5dd86afe167e6eb08ea3044c0d6356df71a87f2b7acc2267f870459c4f2cd/06d5dd86afe167e6eb08ea3044c0d6356df71a87f2b7acc2267f870459c4f2cd-json.log",
"Name": "/old-k8s-version-092258",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"old-k8s-version-092258:/var",
"/lib/modules:/lib/modules:ro"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "old-k8s-version-092258",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "06d5dd86afe167e6eb08ea3044c0d6356df71a87f2b7acc2267f870459c4f2cd",
"LowerDir": "/var/lib/docker/overlay2/8452e0c048f2d0756f64c494882e5db8b7ecd5ac7b4b99aa190200898d89fa81-init/diff:/var/lib/docker/overlay2/789a4b9f9866e585907664b1eaf98d94438dbf699e0511f3ca5ba5ea682b005e/diff",
"MergedDir": "/var/lib/docker/overlay2/8452e0c048f2d0756f64c494882e5db8b7ecd5ac7b4b99aa190200898d89fa81/merged",
"UpperDir": "/var/lib/docker/overlay2/8452e0c048f2d0756f64c494882e5db8b7ecd5ac7b4b99aa190200898d89fa81/diff",
"WorkDir": "/var/lib/docker/overlay2/8452e0c048f2d0756f64c494882e5db8b7ecd5ac7b4b99aa190200898d89fa81/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "old-k8s-version-092258",
"Source": "/var/lib/docker/volumes/old-k8s-version-092258/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "old-k8s-version-092258",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-092258",
"name.minikube.sigs.k8s.io": "old-k8s-version-092258",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "858034611e5cf97ef820625d4dcf77e9b3d1510529f8fc62d29cb6c8391e9b31",
"SandboxKey": "/var/run/docker/netns/858034611e5c",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "36720"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "36721"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "36724"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "36722"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "36723"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"old-k8s-version-092258": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "66:d3:7f:90:c5:6d",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "02cac79c841e49103d05ede51175e2b52d6dd809de4e55337963bb73586b9563",
"EndpointID": "d33cdf4d1c16a9f5945ab24f1a11153f9cd4665673d1a383064fc3d56825842f",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-092258",
"06d5dd86afe1"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p old-k8s-version-092258 -n old-k8s-version-092258
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-arm64 -p old-k8s-version-092258 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-arm64 -p old-k8s-version-092258 logs -n 25: (1.17643389s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────┬─────────┬─────────┬─────────────────────┬────────────
─────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────┼─────────┼─────────┼─────────────────────┼────────────
─────────┤
│ ssh │ -p cilium-650772 sudo cat /etc/docker/daemon.json │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo docker system info │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo systemctl status cri-docker --all --full --no-pager │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo systemctl cat cri-docker --no-pager │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo cri-dockerd --version │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo systemctl status containerd --all --full --no-pager │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo systemctl cat containerd --no-pager │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo cat /lib/systemd/system/containerd.service │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo cat /etc/containerd/config.toml │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo containerd config dump │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo systemctl status crio --all --full --no-pager │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo systemctl cat crio --no-pager │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ ssh │ -p cilium-650772 sudo crio config │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ │
│ delete │ -p cilium-650772 │ cilium-650772 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ 21 Nov 25 14:42 UTC │
│ start │ -p cert-expiration-184410 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=containerd │ cert-expiration-184410 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ 21 Nov 25 14:42 UTC │
│ ssh │ force-systemd-env-041746 ssh cat /etc/containerd/config.toml │ force-systemd-env-041746 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ 21 Nov 25 14:42 UTC │
│ delete │ -p force-systemd-env-041746 │ force-systemd-env-041746 │ jenkins │ v1.37.0 │ 21 Nov 25 14:42 UTC │ 21 Nov 25 14:43 UTC │
│ start │ -p cert-options-035007 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-035007 │ jenkins │ v1.37.0 │ 21 Nov 25 14:43 UTC │ 21 Nov 25 14:43 UTC │
│ ssh │ cert-options-035007 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-035007 │ jenkins │ v1.37.0 │ 21 Nov 25 14:43 UTC │ 21 Nov 25 14:43 UTC │
│ ssh │ -p cert-options-035007 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-035007 │ jenkins │ v1.37.0 │ 21 Nov 25 14:43 UTC │ 21 Nov 25 14:43 UTC │
│ delete │ -p cert-options-035007 │ cert-options-035007 │ jenkins │ v1.37.0 │ 21 Nov 25 14:43 UTC │ 21 Nov 25 14:43 UTC │
│ start │ -p old-k8s-version-092258 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-092258 │ jenkins │ v1.37.0 │ 21 Nov 25 14:43 UTC │ 21 Nov 25 14:44 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────┴─────────┴─────────┴─────────────────────┴────────────
─────────┘
==> Last Start <==
Log file created at: 2025/11/21 14:43:36
Running on machine: ip-172-31-21-244
Binary: Built with gc go1.24.6 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1121 14:43:36.383330 2835167 out.go:360] Setting OutFile to fd 1 ...
I1121 14:43:36.383513 2835167 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1121 14:43:36.383524 2835167 out.go:374] Setting ErrFile to fd 2...
I1121 14:43:36.383530 2835167 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1121 14:43:36.383828 2835167 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21847-2633933/.minikube/bin
I1121 14:43:36.384345 2835167 out.go:368] Setting JSON to false
I1121 14:43:36.385436 2835167 start.go:133] hostinfo: {"hostname":"ip-172-31-21-244","uptime":69965,"bootTime":1763666252,"procs":190,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"da8ac1fd-6236-412a-a346-95873c98230d"}
I1121 14:43:36.385513 2835167 start.go:143] virtualization:
I1121 14:43:36.390376 2835167 out.go:179] * [old-k8s-version-092258] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I1121 14:43:36.394191 2835167 notify.go:221] Checking for updates...
I1121 14:43:36.397425 2835167 out.go:179] - MINIKUBE_LOCATION=21847
I1121 14:43:36.400714 2835167 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1121 14:43:36.403761 2835167 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21847-2633933/kubeconfig
I1121 14:43:36.406876 2835167 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21847-2633933/.minikube
I1121 14:43:36.409847 2835167 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I1121 14:43:36.413120 2835167 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1121 14:43:36.416775 2835167 config.go:182] Loaded profile config "cert-expiration-184410": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1121 14:43:36.416939 2835167 driver.go:422] Setting default libvirt URI to qemu:///system
I1121 14:43:36.450855 2835167 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I1121 14:43:36.450991 2835167 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1121 14:43:36.516624 2835167 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-21 14:43:36.506387596 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1121 14:43:36.516732 2835167 docker.go:319] overlay module found
I1121 14:43:36.519985 2835167 out.go:179] * Using the docker driver based on user configuration
I1121 14:43:36.522932 2835167 start.go:309] selected driver: docker
I1121 14:43:36.522956 2835167 start.go:930] validating driver "docker" against <nil>
I1121 14:43:36.522972 2835167 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1121 14:43:36.523794 2835167 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1121 14:43:36.580334 2835167 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:36 OomKillDisable:true NGoroutines:52 SystemTime:2025-11-21 14:43:36.571381735 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I1121 14:43:36.580506 2835167 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1121 14:43:36.580737 2835167 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1121 14:43:36.583854 2835167 out.go:179] * Using Docker driver with root privileges
I1121 14:43:36.586764 2835167 cni.go:84] Creating CNI manager for ""
I1121 14:43:36.586838 2835167 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:43:36.586852 2835167 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1121 14:43:36.586941 2835167 start.go:353] cluster config:
{Name:old-k8s-version-092258 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-092258 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local
ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSH
AuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1121 14:43:36.590049 2835167 out.go:179] * Starting "old-k8s-version-092258" primary control-plane node in "old-k8s-version-092258" cluster
I1121 14:43:36.592891 2835167 cache.go:134] Beginning downloading kic base image for docker with containerd
I1121 14:43:36.595918 2835167 out.go:179] * Pulling base image v0.0.48-1763507788-21924 ...
I1121 14:43:36.598784 2835167 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1121 14:43:36.598825 2835167 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a in local docker daemon
I1121 14:43:36.598850 2835167 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21847-2633933/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4
I1121 14:43:36.598866 2835167 cache.go:65] Caching tarball of preloaded images
I1121 14:43:36.598958 2835167 preload.go:238] Found /home/jenkins/minikube-integration/21847-2633933/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I1121 14:43:36.598968 2835167 cache.go:68] Finished verifying existence of preloaded tar for v1.28.0 on containerd
I1121 14:43:36.599136 2835167 profile.go:143] Saving config to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/config.json ...
I1121 14:43:36.599165 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/config.json: {Name:mk03fe35747f6c73b79e2daee9ca9c7b13210439 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:36.618070 2835167 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a in local docker daemon, skipping pull
I1121 14:43:36.618098 2835167 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a exists in daemon, skipping load
I1121 14:43:36.618116 2835167 cache.go:243] Successfully downloaded all kic artifacts
I1121 14:43:36.618138 2835167 start.go:360] acquireMachinesLock for old-k8s-version-092258: {Name:mkf21290144e8164ceda2548005b3a6e3ed2df4c Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1121 14:43:36.618251 2835167 start.go:364] duration metric: took 91.969µs to acquireMachinesLock for "old-k8s-version-092258"
I1121 14:43:36.618280 2835167 start.go:93] Provisioning new machine with config: &{Name:old-k8s-version-092258 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-092258 Namespace:default APIServerHAVIP:
APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:f
alse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1121 14:43:36.618362 2835167 start.go:125] createHost starting for "" (driver="docker")
I1121 14:43:36.621674 2835167 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1121 14:43:36.621900 2835167 start.go:159] libmachine.API.Create for "old-k8s-version-092258" (driver="docker")
I1121 14:43:36.621945 2835167 client.go:173] LocalClient.Create starting
I1121 14:43:36.622015 2835167 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca.pem
I1121 14:43:36.622055 2835167 main.go:143] libmachine: Decoding PEM data...
I1121 14:43:36.622071 2835167 main.go:143] libmachine: Parsing certificate...
I1121 14:43:36.622122 2835167 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/cert.pem
I1121 14:43:36.622144 2835167 main.go:143] libmachine: Decoding PEM data...
I1121 14:43:36.622155 2835167 main.go:143] libmachine: Parsing certificate...
I1121 14:43:36.622504 2835167 cli_runner.go:164] Run: docker network inspect old-k8s-version-092258 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1121 14:43:36.638581 2835167 cli_runner.go:211] docker network inspect old-k8s-version-092258 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1121 14:43:36.638673 2835167 network_create.go:284] running [docker network inspect old-k8s-version-092258] to gather additional debugging logs...
I1121 14:43:36.638695 2835167 cli_runner.go:164] Run: docker network inspect old-k8s-version-092258
W1121 14:43:36.654171 2835167 cli_runner.go:211] docker network inspect old-k8s-version-092258 returned with exit code 1
I1121 14:43:36.654200 2835167 network_create.go:287] error running [docker network inspect old-k8s-version-092258]: docker network inspect old-k8s-version-092258: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-092258 not found
I1121 14:43:36.654221 2835167 network_create.go:289] output of [docker network inspect old-k8s-version-092258]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-092258 not found
** /stderr **
I1121 14:43:36.654336 2835167 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1121 14:43:36.670217 2835167 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-c13a3bee40ff IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:02:9f:8e:c6:2a:d6} reservation:<nil>}
I1121 14:43:36.670512 2835167 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-1859e8fd5584 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:c6:c6:00:f6:5b:96} reservation:<nil>}
I1121 14:43:36.670770 2835167 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-44a9b6062c4d IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:82:b5:31:a5:3d:f0} reservation:<nil>}
I1121 14:43:36.671175 2835167 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001a1a410}
I1121 14:43:36.671200 2835167 network_create.go:124] attempt to create docker network old-k8s-version-092258 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I1121 14:43:36.671260 2835167 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-092258 old-k8s-version-092258
I1121 14:43:36.731269 2835167 network_create.go:108] docker network old-k8s-version-092258 192.168.76.0/24 created
I1121 14:43:36.731302 2835167 kic.go:121] calculated static IP "192.168.76.2" for the "old-k8s-version-092258" container
I1121 14:43:36.731379 2835167 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1121 14:43:36.747231 2835167 cli_runner.go:164] Run: docker volume create old-k8s-version-092258 --label name.minikube.sigs.k8s.io=old-k8s-version-092258 --label created_by.minikube.sigs.k8s.io=true
I1121 14:43:36.766444 2835167 oci.go:103] Successfully created a docker volume old-k8s-version-092258
I1121 14:43:36.766529 2835167 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-092258-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-092258 --entrypoint /usr/bin/test -v old-k8s-version-092258:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a -d /var/lib
I1121 14:43:37.323084 2835167 oci.go:107] Successfully prepared a docker volume old-k8s-version-092258
I1121 14:43:37.323160 2835167 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1121 14:43:37.323176 2835167 kic.go:194] Starting extracting preloaded images to volume ...
I1121 14:43:37.323249 2835167 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21847-2633933/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-092258:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a -I lz4 -xf /preloaded.tar -C /extractDir
I1121 14:43:42.479471 2835167 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21847-2633933/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-092258:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a -I lz4 -xf /preloaded.tar -C /extractDir: (5.156180728s)
I1121 14:43:42.479504 2835167 kic.go:203] duration metric: took 5.156324945s to extract preloaded images to volume ...
W1121 14:43:42.479641 2835167 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1121 14:43:42.479761 2835167 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1121 14:43:42.537202 2835167 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-092258 --name old-k8s-version-092258 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-092258 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-092258 --network old-k8s-version-092258 --ip 192.168.76.2 --volume old-k8s-version-092258:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a
I1121 14:43:42.837167 2835167 cli_runner.go:164] Run: docker container inspect old-k8s-version-092258 --format={{.State.Running}}
I1121 14:43:42.855671 2835167 cli_runner.go:164] Run: docker container inspect old-k8s-version-092258 --format={{.State.Status}}
I1121 14:43:42.878990 2835167 cli_runner.go:164] Run: docker exec old-k8s-version-092258 stat /var/lib/dpkg/alternatives/iptables
I1121 14:43:42.930669 2835167 oci.go:144] the created container "old-k8s-version-092258" has a running status.
I1121 14:43:42.930708 2835167 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa...
I1121 14:43:43.641994 2835167 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1121 14:43:43.662600 2835167 cli_runner.go:164] Run: docker container inspect old-k8s-version-092258 --format={{.State.Status}}
I1121 14:43:43.680831 2835167 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1121 14:43:43.680859 2835167 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-092258 chown docker:docker /home/docker/.ssh/authorized_keys]
I1121 14:43:43.723054 2835167 cli_runner.go:164] Run: docker container inspect old-k8s-version-092258 --format={{.State.Status}}
I1121 14:43:43.743125 2835167 machine.go:94] provisionDockerMachine start ...
I1121 14:43:43.743225 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:43.760910 2835167 main.go:143] libmachine: Using SSH client type: native
I1121 14:43:43.761356 2835167 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 36720 <nil> <nil>}
I1121 14:43:43.761377 2835167 main.go:143] libmachine: About to run SSH command:
hostname
I1121 14:43:43.762044 2835167 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1121 14:43:46.904947 2835167 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-092258
I1121 14:43:46.904969 2835167 ubuntu.go:182] provisioning hostname "old-k8s-version-092258"
I1121 14:43:46.905064 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:46.922185 2835167 main.go:143] libmachine: Using SSH client type: native
I1121 14:43:46.922512 2835167 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 36720 <nil> <nil>}
I1121 14:43:46.922533 2835167 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-092258 && echo "old-k8s-version-092258" | sudo tee /etc/hostname
I1121 14:43:47.078378 2835167 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-092258
I1121 14:43:47.078461 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:47.102253 2835167 main.go:143] libmachine: Using SSH client type: native
I1121 14:43:47.102562 2835167 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x3eefe0] 0x3f1790 <nil> [] 0s} 127.0.0.1 36720 <nil> <nil>}
I1121 14:43:47.102584 2835167 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-092258' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-092258/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-092258' | sudo tee -a /etc/hosts;
fi
fi
I1121 14:43:47.245266 2835167 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1121 14:43:47.245351 2835167 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21847-2633933/.minikube CaCertPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21847-2633933/.minikube}
I1121 14:43:47.245378 2835167 ubuntu.go:190] setting up certificates
I1121 14:43:47.245387 2835167 provision.go:84] configureAuth start
I1121 14:43:47.245451 2835167 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-092258
I1121 14:43:47.263540 2835167 provision.go:143] copyHostCerts
I1121 14:43:47.263612 2835167 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-2633933/.minikube/ca.pem, removing ...
I1121 14:43:47.263626 2835167 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-2633933/.minikube/ca.pem
I1121 14:43:47.263706 2835167 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21847-2633933/.minikube/ca.pem (1082 bytes)
I1121 14:43:47.263811 2835167 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-2633933/.minikube/cert.pem, removing ...
I1121 14:43:47.263822 2835167 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-2633933/.minikube/cert.pem
I1121 14:43:47.263853 2835167 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21847-2633933/.minikube/cert.pem (1123 bytes)
I1121 14:43:47.263922 2835167 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-2633933/.minikube/key.pem, removing ...
I1121 14:43:47.263932 2835167 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-2633933/.minikube/key.pem
I1121 14:43:47.263960 2835167 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21847-2633933/.minikube/key.pem (1679 bytes)
I1121 14:43:47.264022 2835167 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-092258 san=[127.0.0.1 192.168.76.2 localhost minikube old-k8s-version-092258]
I1121 14:43:48.319202 2835167 provision.go:177] copyRemoteCerts
I1121 14:43:48.319298 2835167 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1121 14:43:48.319410 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:48.336010 2835167 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:36720 SSHKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa Username:docker}
I1121 14:43:48.436628 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1121 14:43:48.454108 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1121 14:43:48.472424 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1121 14:43:48.490030 2835167 provision.go:87] duration metric: took 1.244618966s to configureAuth
I1121 14:43:48.490068 2835167 ubuntu.go:206] setting minikube options for container-runtime
I1121 14:43:48.490248 2835167 config.go:182] Loaded profile config "old-k8s-version-092258": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1121 14:43:48.490256 2835167 machine.go:97] duration metric: took 4.747108297s to provisionDockerMachine
I1121 14:43:48.490263 2835167 client.go:176] duration metric: took 11.868306871s to LocalClient.Create
I1121 14:43:48.490277 2835167 start.go:167] duration metric: took 11.868378746s to libmachine.API.Create "old-k8s-version-092258"
I1121 14:43:48.490284 2835167 start.go:293] postStartSetup for "old-k8s-version-092258" (driver="docker")
I1121 14:43:48.490298 2835167 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1121 14:43:48.490349 2835167 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1121 14:43:48.490386 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:48.506758 2835167 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:36720 SSHKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa Username:docker}
I1121 14:43:48.608899 2835167 ssh_runner.go:195] Run: cat /etc/os-release
I1121 14:43:48.612079 2835167 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1121 14:43:48.612112 2835167 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1121 14:43:48.612141 2835167 filesync.go:126] Scanning /home/jenkins/minikube-integration/21847-2633933/.minikube/addons for local assets ...
I1121 14:43:48.612212 2835167 filesync.go:126] Scanning /home/jenkins/minikube-integration/21847-2633933/.minikube/files for local assets ...
I1121 14:43:48.612293 2835167 filesync.go:149] local asset: /home/jenkins/minikube-integration/21847-2633933/.minikube/files/etc/ssl/certs/26357852.pem -> 26357852.pem in /etc/ssl/certs
I1121 14:43:48.612406 2835167 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1121 14:43:48.619568 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/files/etc/ssl/certs/26357852.pem --> /etc/ssl/certs/26357852.pem (1708 bytes)
I1121 14:43:48.636807 2835167 start.go:296] duration metric: took 146.508249ms for postStartSetup
I1121 14:43:48.637286 2835167 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-092258
I1121 14:43:48.653983 2835167 profile.go:143] Saving config to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/config.json ...
I1121 14:43:48.654267 2835167 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1121 14:43:48.654326 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:48.671301 2835167 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:36720 SSHKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa Username:docker}
I1121 14:43:48.770419 2835167 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1121 14:43:48.775323 2835167 start.go:128] duration metric: took 12.156945443s to createHost
I1121 14:43:48.775348 2835167 start.go:83] releasing machines lock for "old-k8s-version-092258", held for 12.157085763s
I1121 14:43:48.775420 2835167 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-092258
I1121 14:43:48.792336 2835167 ssh_runner.go:195] Run: cat /version.json
I1121 14:43:48.792390 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:48.792660 2835167 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1121 14:43:48.792747 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:43:48.822571 2835167 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:36720 SSHKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa Username:docker}
I1121 14:43:48.823165 2835167 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:36720 SSHKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa Username:docker}
I1121 14:43:49.010028 2835167 ssh_runner.go:195] Run: systemctl --version
I1121 14:43:49.017657 2835167 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1121 14:43:49.023491 2835167 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1121 14:43:49.023609 2835167 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1121 14:43:49.052303 2835167 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I1121 14:43:49.052375 2835167 start.go:496] detecting cgroup driver to use...
I1121 14:43:49.052424 2835167 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1121 14:43:49.052491 2835167 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1121 14:43:49.069013 2835167 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1121 14:43:49.083455 2835167 docker.go:218] disabling cri-docker service (if available) ...
I1121 14:43:49.083552 2835167 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1121 14:43:49.106526 2835167 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1121 14:43:49.129811 2835167 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1121 14:43:49.256720 2835167 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1121 14:43:49.389600 2835167 docker.go:234] disabling docker service ...
I1121 14:43:49.389683 2835167 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1121 14:43:49.410151 2835167 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1121 14:43:49.423211 2835167 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1121 14:43:49.545870 2835167 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1121 14:43:49.674290 2835167 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1121 14:43:49.688105 2835167 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1121 14:43:49.704461 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1121 14:43:49.715375 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1121 14:43:49.725179 2835167 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1121 14:43:49.725253 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1121 14:43:49.734991 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1121 14:43:49.745235 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1121 14:43:49.754840 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1121 14:43:49.764086 2835167 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1121 14:43:49.773152 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1121 14:43:49.781841 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1121 14:43:49.791026 2835167 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1121 14:43:49.800527 2835167 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1121 14:43:49.808398 2835167 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1121 14:43:49.815956 2835167 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:43:49.931886 2835167 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1121 14:43:50.066706 2835167 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1121 14:43:50.066831 2835167 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1121 14:43:50.071006 2835167 start.go:564] Will wait 60s for crictl version
I1121 14:43:50.071125 2835167 ssh_runner.go:195] Run: which crictl
I1121 14:43:50.075393 2835167 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1121 14:43:50.118651 2835167 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1121 14:43:50.118773 2835167 ssh_runner.go:195] Run: containerd --version
I1121 14:43:50.141212 2835167 ssh_runner.go:195] Run: containerd --version
I1121 14:43:50.169638 2835167 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1121 14:43:50.172499 2835167 cli_runner.go:164] Run: docker network inspect old-k8s-version-092258 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1121 14:43:50.189507 2835167 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I1121 14:43:50.198370 2835167 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1121 14:43:50.208962 2835167 kubeadm.go:884] updating cluster {Name:old-k8s-version-092258 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-092258 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1121 14:43:50.209203 2835167 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1121 14:43:50.209272 2835167 ssh_runner.go:195] Run: sudo crictl images --output json
I1121 14:43:50.234384 2835167 containerd.go:627] all images are preloaded for containerd runtime.
I1121 14:43:50.234409 2835167 containerd.go:534] Images already preloaded, skipping extraction
I1121 14:43:50.234475 2835167 ssh_runner.go:195] Run: sudo crictl images --output json
I1121 14:43:50.259395 2835167 containerd.go:627] all images are preloaded for containerd runtime.
I1121 14:43:50.259421 2835167 cache_images.go:86] Images are preloaded, skipping loading
I1121 14:43:50.259430 2835167 kubeadm.go:935] updating node { 192.168.76.2 8443 v1.28.0 containerd true true} ...
I1121 14:43:50.259536 2835167 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-092258 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-092258 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1121 14:43:50.259609 2835167 ssh_runner.go:195] Run: sudo crictl info
I1121 14:43:50.287070 2835167 cni.go:84] Creating CNI manager for ""
I1121 14:43:50.287095 2835167 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:43:50.287115 2835167 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1121 14:43:50.287139 2835167 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-092258 NodeName:old-k8s-version-092258 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt
StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1121 14:43:50.287271 2835167 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-092258"
kubeletExtraArgs:
node-ip: 192.168.76.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1121 14:43:50.287342 2835167 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1121 14:43:50.295386 2835167 binaries.go:51] Found k8s binaries, skipping transfer
I1121 14:43:50.295454 2835167 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1121 14:43:50.303213 2835167 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1121 14:43:50.317127 2835167 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1121 14:43:50.331240 2835167 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2176 bytes)
I1121 14:43:50.344296 2835167 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I1121 14:43:50.347919 2835167 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1121 14:43:50.357793 2835167 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:43:50.483017 2835167 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:43:50.499630 2835167 certs.go:69] Setting up /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258 for IP: 192.168.76.2
I1121 14:43:50.499697 2835167 certs.go:195] generating shared ca certs ...
I1121 14:43:50.499729 2835167 certs.go:227] acquiring lock for ca certs: {Name:mk0a1b8efa9f1d453751b4f7afafeea16d7243a3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:50.499912 2835167 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/ca.key
I1121 14:43:50.499982 2835167 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/proxy-client-ca.key
I1121 14:43:50.500020 2835167 certs.go:257] generating profile certs ...
I1121 14:43:50.500125 2835167 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/client.key
I1121 14:43:50.500157 2835167 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/client.crt with IP's: []
I1121 14:43:50.881389 2835167 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/client.crt ...
I1121 14:43:50.881423 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/client.crt: {Name:mkd66b37bd8f68df88ee391b1c0ae406d24100dc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:50.881622 2835167 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/client.key ...
I1121 14:43:50.881638 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/client.key: {Name:mk87497e50632ba54cdc705e25ae82f0b49d923a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:50.881733 2835167 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.key.fe0fc8ce
I1121 14:43:50.881751 2835167 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.crt.fe0fc8ce with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I1121 14:43:51.368107 2835167 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.crt.fe0fc8ce ...
I1121 14:43:51.368141 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.crt.fe0fc8ce: {Name:mke3412122bd471676c09fe30765bbb879486748 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:51.368348 2835167 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.key.fe0fc8ce ...
I1121 14:43:51.368363 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.key.fe0fc8ce: {Name:mkb54d05c24cffdedc4d0fc59e5780f32a7a4815 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:51.368463 2835167 certs.go:382] copying /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.crt.fe0fc8ce -> /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.crt
I1121 14:43:51.368560 2835167 certs.go:386] copying /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.key.fe0fc8ce -> /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.key
I1121 14:43:51.368628 2835167 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.key
I1121 14:43:51.368647 2835167 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.crt with IP's: []
I1121 14:43:51.447326 2835167 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.crt ...
I1121 14:43:51.447360 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.crt: {Name:mk8fd112c818af834b5d68c83f8c92f6291ef45d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:51.447577 2835167 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.key ...
I1121 14:43:51.447598 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.key: {Name:mk461afd810cf943501ef59a65730b33eecea0e3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:43:51.447801 2835167 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/2635785.pem (1338 bytes)
W1121 14:43:51.447843 2835167 certs.go:480] ignoring /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/2635785_empty.pem, impossibly tiny 0 bytes
I1121 14:43:51.447858 2835167 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca-key.pem (1675 bytes)
I1121 14:43:51.447889 2835167 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/ca.pem (1082 bytes)
I1121 14:43:51.447920 2835167 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/cert.pem (1123 bytes)
I1121 14:43:51.447947 2835167 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/key.pem (1679 bytes)
I1121 14:43:51.447993 2835167 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-2633933/.minikube/files/etc/ssl/certs/26357852.pem (1708 bytes)
I1121 14:43:51.448566 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1121 14:43:51.466854 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I1121 14:43:51.484620 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1121 14:43:51.502526 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1121 14:43:51.521430 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1121 14:43:51.540179 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1121 14:43:51.560949 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1121 14:43:51.579376 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/profiles/old-k8s-version-092258/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1121 14:43:51.598197 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1121 14:43:51.615934 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/certs/2635785.pem --> /usr/share/ca-certificates/2635785.pem (1338 bytes)
I1121 14:43:51.634213 2835167 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-2633933/.minikube/files/etc/ssl/certs/26357852.pem --> /usr/share/ca-certificates/26357852.pem (1708 bytes)
I1121 14:43:51.652095 2835167 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1121 14:43:51.664901 2835167 ssh_runner.go:195] Run: openssl version
I1121 14:43:51.671463 2835167 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1121 14:43:51.680109 2835167 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1121 14:43:51.684127 2835167 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 21 13:57 /usr/share/ca-certificates/minikubeCA.pem
I1121 14:43:51.684210 2835167 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1121 14:43:51.727682 2835167 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1121 14:43:51.736178 2835167 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/2635785.pem && ln -fs /usr/share/ca-certificates/2635785.pem /etc/ssl/certs/2635785.pem"
I1121 14:43:51.744471 2835167 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2635785.pem
I1121 14:43:51.748815 2835167 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 21 14:03 /usr/share/ca-certificates/2635785.pem
I1121 14:43:51.748886 2835167 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2635785.pem
I1121 14:43:51.790035 2835167 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/2635785.pem /etc/ssl/certs/51391683.0"
I1121 14:43:51.798184 2835167 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/26357852.pem && ln -fs /usr/share/ca-certificates/26357852.pem /etc/ssl/certs/26357852.pem"
I1121 14:43:51.806524 2835167 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/26357852.pem
I1121 14:43:51.810447 2835167 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 21 14:03 /usr/share/ca-certificates/26357852.pem
I1121 14:43:51.810539 2835167 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/26357852.pem
I1121 14:43:51.851288 2835167 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/26357852.pem /etc/ssl/certs/3ec20f2e.0"
I1121 14:43:51.859608 2835167 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1121 14:43:51.863193 2835167 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1121 14:43:51.863292 2835167 kubeadm.go:401] StartCluster: {Name:old-k8s-version-092258 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-092258 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1121 14:43:51.863366 2835167 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1121 14:43:51.863446 2835167 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1121 14:43:51.891859 2835167 cri.go:89] found id: ""
I1121 14:43:51.891941 2835167 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1121 14:43:51.900527 2835167 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1121 14:43:51.909151 2835167 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1121 14:43:51.909241 2835167 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1121 14:43:51.919737 2835167 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1121 14:43:51.919760 2835167 kubeadm.go:158] found existing configuration files:
I1121 14:43:51.919826 2835167 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1121 14:43:51.928877 2835167 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1121 14:43:51.928996 2835167 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1121 14:43:51.936697 2835167 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1121 14:43:51.944973 2835167 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1121 14:43:51.945066 2835167 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1121 14:43:51.952856 2835167 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1121 14:43:51.960547 2835167 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1121 14:43:51.960686 2835167 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1121 14:43:51.968472 2835167 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1121 14:43:51.976805 2835167 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1121 14:43:51.976888 2835167 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1121 14:43:51.984345 2835167 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1121 14:43:52.080198 2835167 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I1121 14:43:52.183959 2835167 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1121 14:44:07.478855 2835167 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1121 14:44:07.478915 2835167 kubeadm.go:319] [preflight] Running pre-flight checks
I1121 14:44:07.479009 2835167 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1121 14:44:07.479066 2835167 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I1121 14:44:07.479102 2835167 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1121 14:44:07.479150 2835167 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1121 14:44:07.479201 2835167 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1121 14:44:07.479250 2835167 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1121 14:44:07.479300 2835167 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1121 14:44:07.479351 2835167 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1121 14:44:07.479413 2835167 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1121 14:44:07.479461 2835167 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1121 14:44:07.479511 2835167 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1121 14:44:07.479559 2835167 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1121 14:44:07.479634 2835167 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1121 14:44:07.479732 2835167 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1121 14:44:07.479828 2835167 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1121 14:44:07.479893 2835167 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1121 14:44:07.482963 2835167 out.go:252] - Generating certificates and keys ...
I1121 14:44:07.483064 2835167 kubeadm.go:319] [certs] Using existing ca certificate authority
I1121 14:44:07.483132 2835167 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1121 14:44:07.483202 2835167 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1121 14:44:07.483261 2835167 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1121 14:44:07.483324 2835167 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1121 14:44:07.483377 2835167 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1121 14:44:07.483433 2835167 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1121 14:44:07.483564 2835167 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-092258] and IPs [192.168.76.2 127.0.0.1 ::1]
I1121 14:44:07.483619 2835167 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1121 14:44:07.483749 2835167 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-092258] and IPs [192.168.76.2 127.0.0.1 ::1]
I1121 14:44:07.483818 2835167 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1121 14:44:07.483886 2835167 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1121 14:44:07.483933 2835167 kubeadm.go:319] [certs] Generating "sa" key and public key
I1121 14:44:07.483992 2835167 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1121 14:44:07.484045 2835167 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1121 14:44:07.484101 2835167 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1121 14:44:07.484169 2835167 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1121 14:44:07.484226 2835167 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1121 14:44:07.484312 2835167 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1121 14:44:07.484381 2835167 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1121 14:44:07.487428 2835167 out.go:252] - Booting up control plane ...
I1121 14:44:07.487608 2835167 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1121 14:44:07.487710 2835167 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1121 14:44:07.487786 2835167 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1121 14:44:07.487905 2835167 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1121 14:44:07.488000 2835167 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1121 14:44:07.488044 2835167 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1121 14:44:07.488215 2835167 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1121 14:44:07.488300 2835167 kubeadm.go:319] [apiclient] All control plane components are healthy after 7.502585 seconds
I1121 14:44:07.488418 2835167 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1121 14:44:07.488571 2835167 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1121 14:44:07.488637 2835167 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1121 14:44:07.488849 2835167 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-092258 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1121 14:44:07.488911 2835167 kubeadm.go:319] [bootstrap-token] Using token: szaotk.n52uxpmszzhbby9z
I1121 14:44:07.491820 2835167 out.go:252] - Configuring RBAC rules ...
I1121 14:44:07.491949 2835167 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1121 14:44:07.492037 2835167 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1121 14:44:07.492184 2835167 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1121 14:44:07.492318 2835167 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1121 14:44:07.492450 2835167 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1121 14:44:07.492566 2835167 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1121 14:44:07.492691 2835167 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1121 14:44:07.492737 2835167 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1121 14:44:07.492785 2835167 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1121 14:44:07.492789 2835167 kubeadm.go:319]
I1121 14:44:07.492852 2835167 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1121 14:44:07.492857 2835167 kubeadm.go:319]
I1121 14:44:07.492938 2835167 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1121 14:44:07.492942 2835167 kubeadm.go:319]
I1121 14:44:07.492968 2835167 kubeadm.go:319] mkdir -p $HOME/.kube
I1121 14:44:07.493047 2835167 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1121 14:44:07.493101 2835167 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1121 14:44:07.493105 2835167 kubeadm.go:319]
I1121 14:44:07.493162 2835167 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1121 14:44:07.493166 2835167 kubeadm.go:319]
I1121 14:44:07.493217 2835167 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1121 14:44:07.493221 2835167 kubeadm.go:319]
I1121 14:44:07.493276 2835167 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1121 14:44:07.493355 2835167 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1121 14:44:07.493428 2835167 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1121 14:44:07.493432 2835167 kubeadm.go:319]
I1121 14:44:07.493521 2835167 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1121 14:44:07.493601 2835167 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1121 14:44:07.493607 2835167 kubeadm.go:319]
I1121 14:44:07.493695 2835167 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token szaotk.n52uxpmszzhbby9z \
I1121 14:44:07.493804 2835167 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:d756a1c258e082bbc06f965046f24233900a8e069c2a9d29a764f0b68af739ae \
I1121 14:44:07.493826 2835167 kubeadm.go:319] --control-plane
I1121 14:44:07.493830 2835167 kubeadm.go:319]
I1121 14:44:07.493920 2835167 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1121 14:44:07.493924 2835167 kubeadm.go:319]
I1121 14:44:07.494010 2835167 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token szaotk.n52uxpmszzhbby9z \
I1121 14:44:07.494129 2835167 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:d756a1c258e082bbc06f965046f24233900a8e069c2a9d29a764f0b68af739ae
I1121 14:44:07.494138 2835167 cni.go:84] Creating CNI manager for ""
I1121 14:44:07.494145 2835167 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:44:07.497166 2835167 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1121 14:44:07.500216 2835167 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1121 14:44:07.505987 2835167 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1121 14:44:07.506006 2835167 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1121 14:44:07.546131 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1121 14:44:08.532445 2835167 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1121 14:44:08.532548 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:08.532605 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-092258 minikube.k8s.io/updated_at=2025_11_21T14_44_08_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=29e0798733fefbdc471fd2bbb38f6a7ae2a26162 minikube.k8s.io/name=old-k8s-version-092258 minikube.k8s.io/primary=true
I1121 14:44:08.674460 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:08.674568 2835167 ops.go:34] apiserver oom_adj: -16
I1121 14:44:09.175394 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:09.675368 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:10.174563 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:10.675545 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:11.174568 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:11.675237 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:12.175098 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:12.675136 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:13.175409 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:13.674508 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:14.175483 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:14.674955 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:15.174638 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:15.674566 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:16.174919 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:16.674946 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:17.174624 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:17.675110 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:18.174609 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:18.674810 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:19.174819 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:19.675503 2835167 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:44:19.793386 2835167 kubeadm.go:1114] duration metric: took 11.260904779s to wait for elevateKubeSystemPrivileges
I1121 14:44:19.793428 2835167 kubeadm.go:403] duration metric: took 27.930140359s to StartCluster
I1121 14:44:19.793447 2835167 settings.go:142] acquiring lock: {Name:mkd6064915932eca5a3b1d70feb4ec8240f340da Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:44:19.793514 2835167 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21847-2633933/kubeconfig
I1121 14:44:19.794554 2835167 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-2633933/kubeconfig: {Name:mkd905aaf74d26e32c0b3e46a7edfbf13f4b98ec Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:44:19.794781 2835167 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1121 14:44:19.794907 2835167 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1121 14:44:19.795160 2835167 config.go:182] Loaded profile config "old-k8s-version-092258": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1121 14:44:19.795206 2835167 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1121 14:44:19.795273 2835167 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-092258"
I1121 14:44:19.795287 2835167 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-092258"
I1121 14:44:19.795308 2835167 host.go:66] Checking if "old-k8s-version-092258" exists ...
I1121 14:44:19.795815 2835167 cli_runner.go:164] Run: docker container inspect old-k8s-version-092258 --format={{.State.Status}}
I1121 14:44:19.795979 2835167 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-092258"
I1121 14:44:19.795995 2835167 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-092258"
I1121 14:44:19.796255 2835167 cli_runner.go:164] Run: docker container inspect old-k8s-version-092258 --format={{.State.Status}}
I1121 14:44:19.798851 2835167 out.go:179] * Verifying Kubernetes components...
I1121 14:44:19.806315 2835167 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:44:19.841770 2835167 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-092258"
I1121 14:44:19.841810 2835167 host.go:66] Checking if "old-k8s-version-092258" exists ...
I1121 14:44:19.842215 2835167 cli_runner.go:164] Run: docker container inspect old-k8s-version-092258 --format={{.State.Status}}
I1121 14:44:19.843021 2835167 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:44:19.845981 2835167 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:44:19.846003 2835167 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1121 14:44:19.846070 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:44:19.880288 2835167 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1121 14:44:19.880310 2835167 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1121 14:44:19.880371 2835167 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-092258
I1121 14:44:19.888389 2835167 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:36720 SSHKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa Username:docker}
I1121 14:44:19.916676 2835167 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:36720 SSHKeyPath:/home/jenkins/minikube-integration/21847-2633933/.minikube/machines/old-k8s-version-092258/id_rsa Username:docker}
I1121 14:44:20.244082 2835167 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1121 14:44:20.285170 2835167 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1121 14:44:20.285362 2835167 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:44:20.332070 2835167 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:44:21.059715 2835167 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-092258" to be "Ready" ...
I1121 14:44:21.059828 2835167 start.go:977] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap
I1121 14:44:21.566332 2835167 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-092258" context rescaled to 1 replicas
I1121 14:44:21.605475 2835167 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.273369049s)
I1121 14:44:21.608818 2835167 out.go:179] * Enabled addons: default-storageclass, storage-provisioner
I1121 14:44:21.611888 2835167 addons.go:530] duration metric: took 1.816656544s for enable addons: enabled=[default-storageclass storage-provisioner]
W1121 14:44:23.063813 2835167 node_ready.go:57] node "old-k8s-version-092258" has "Ready":"False" status (will retry)
W1121 14:44:25.563129 2835167 node_ready.go:57] node "old-k8s-version-092258" has "Ready":"False" status (will retry)
W1121 14:44:27.564011 2835167 node_ready.go:57] node "old-k8s-version-092258" has "Ready":"False" status (will retry)
W1121 14:44:30.063612 2835167 node_ready.go:57] node "old-k8s-version-092258" has "Ready":"False" status (will retry)
W1121 14:44:32.562923 2835167 node_ready.go:57] node "old-k8s-version-092258" has "Ready":"False" status (will retry)
I1121 14:44:33.563363 2835167 node_ready.go:49] node "old-k8s-version-092258" is "Ready"
I1121 14:44:33.563395 2835167 node_ready.go:38] duration metric: took 12.503648731s for node "old-k8s-version-092258" to be "Ready" ...
I1121 14:44:33.563409 2835167 api_server.go:52] waiting for apiserver process to appear ...
I1121 14:44:33.563474 2835167 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1121 14:44:33.580001 2835167 api_server.go:72] duration metric: took 13.7851816s to wait for apiserver process to appear ...
I1121 14:44:33.580026 2835167 api_server.go:88] waiting for apiserver healthz status ...
I1121 14:44:33.580045 2835167 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:44:33.589120 2835167 api_server.go:279] https://192.168.76.2:8443/healthz returned 200:
ok
I1121 14:44:33.590563 2835167 api_server.go:141] control plane version: v1.28.0
I1121 14:44:33.590586 2835167 api_server.go:131] duration metric: took 10.553339ms to wait for apiserver health ...
I1121 14:44:33.590594 2835167 system_pods.go:43] waiting for kube-system pods to appear ...
I1121 14:44:33.595235 2835167 system_pods.go:59] 8 kube-system pods found
I1121 14:44:33.595322 2835167 system_pods.go:61] "coredns-5dd5756b68-86stv" [6a48c3f2-f439-40e1-885b-5850f95d1ffc] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:44:33.595346 2835167 system_pods.go:61] "etcd-old-k8s-version-092258" [bbb172b1-cd74-44e9-ba24-92155ea08be4] Running
I1121 14:44:33.595390 2835167 system_pods.go:61] "kindnet-tfn5q" [6bec8380-6059-40d0-b0ed-6c3906f84591] Running
I1121 14:44:33.595417 2835167 system_pods.go:61] "kube-apiserver-old-k8s-version-092258" [adf091a2-7b6d-4ba0-a537-9c5f7f93c471] Running
I1121 14:44:33.595442 2835167 system_pods.go:61] "kube-controller-manager-old-k8s-version-092258" [01f77916-588d-469c-b175-3bbcdfe34ce8] Running
I1121 14:44:33.595479 2835167 system_pods.go:61] "kube-proxy-tdwt5" [94e025a3-f19d-40ce-b6a6-9e2eb3b8f998] Running
I1121 14:44:33.595506 2835167 system_pods.go:61] "kube-scheduler-old-k8s-version-092258" [7dfed185-93bd-4218-9c17-a6105d34022f] Running
I1121 14:44:33.595532 2835167 system_pods.go:61] "storage-provisioner" [a31c361f-8fb6-4726-a554-e70884e4d16e] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:44:33.595572 2835167 system_pods.go:74] duration metric: took 4.969827ms to wait for pod list to return data ...
I1121 14:44:33.595601 2835167 default_sa.go:34] waiting for default service account to be created ...
I1121 14:44:33.599253 2835167 default_sa.go:45] found service account: "default"
I1121 14:44:33.599325 2835167 default_sa.go:55] duration metric: took 3.703418ms for default service account to be created ...
I1121 14:44:33.599363 2835167 system_pods.go:116] waiting for k8s-apps to be running ...
I1121 14:44:33.603344 2835167 system_pods.go:86] 8 kube-system pods found
I1121 14:44:33.603423 2835167 system_pods.go:89] "coredns-5dd5756b68-86stv" [6a48c3f2-f439-40e1-885b-5850f95d1ffc] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:44:33.603457 2835167 system_pods.go:89] "etcd-old-k8s-version-092258" [bbb172b1-cd74-44e9-ba24-92155ea08be4] Running
I1121 14:44:33.603486 2835167 system_pods.go:89] "kindnet-tfn5q" [6bec8380-6059-40d0-b0ed-6c3906f84591] Running
I1121 14:44:33.603513 2835167 system_pods.go:89] "kube-apiserver-old-k8s-version-092258" [adf091a2-7b6d-4ba0-a537-9c5f7f93c471] Running
I1121 14:44:33.603549 2835167 system_pods.go:89] "kube-controller-manager-old-k8s-version-092258" [01f77916-588d-469c-b175-3bbcdfe34ce8] Running
I1121 14:44:33.603576 2835167 system_pods.go:89] "kube-proxy-tdwt5" [94e025a3-f19d-40ce-b6a6-9e2eb3b8f998] Running
I1121 14:44:33.603600 2835167 system_pods.go:89] "kube-scheduler-old-k8s-version-092258" [7dfed185-93bd-4218-9c17-a6105d34022f] Running
I1121 14:44:33.603640 2835167 system_pods.go:89] "storage-provisioner" [a31c361f-8fb6-4726-a554-e70884e4d16e] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:44:33.603680 2835167 retry.go:31] will retry after 248.130267ms: missing components: kube-dns
I1121 14:44:33.863548 2835167 system_pods.go:86] 8 kube-system pods found
I1121 14:44:33.863646 2835167 system_pods.go:89] "coredns-5dd5756b68-86stv" [6a48c3f2-f439-40e1-885b-5850f95d1ffc] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:44:33.863677 2835167 system_pods.go:89] "etcd-old-k8s-version-092258" [bbb172b1-cd74-44e9-ba24-92155ea08be4] Running
I1121 14:44:33.863699 2835167 system_pods.go:89] "kindnet-tfn5q" [6bec8380-6059-40d0-b0ed-6c3906f84591] Running
I1121 14:44:33.863735 2835167 system_pods.go:89] "kube-apiserver-old-k8s-version-092258" [adf091a2-7b6d-4ba0-a537-9c5f7f93c471] Running
I1121 14:44:33.863762 2835167 system_pods.go:89] "kube-controller-manager-old-k8s-version-092258" [01f77916-588d-469c-b175-3bbcdfe34ce8] Running
I1121 14:44:33.863787 2835167 system_pods.go:89] "kube-proxy-tdwt5" [94e025a3-f19d-40ce-b6a6-9e2eb3b8f998] Running
I1121 14:44:33.863827 2835167 system_pods.go:89] "kube-scheduler-old-k8s-version-092258" [7dfed185-93bd-4218-9c17-a6105d34022f] Running
I1121 14:44:33.863857 2835167 system_pods.go:89] "storage-provisioner" [a31c361f-8fb6-4726-a554-e70884e4d16e] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:44:33.863904 2835167 retry.go:31] will retry after 379.807267ms: missing components: kube-dns
I1121 14:44:34.248297 2835167 system_pods.go:86] 8 kube-system pods found
I1121 14:44:34.248331 2835167 system_pods.go:89] "coredns-5dd5756b68-86stv" [6a48c3f2-f439-40e1-885b-5850f95d1ffc] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:44:34.248338 2835167 system_pods.go:89] "etcd-old-k8s-version-092258" [bbb172b1-cd74-44e9-ba24-92155ea08be4] Running
I1121 14:44:34.248344 2835167 system_pods.go:89] "kindnet-tfn5q" [6bec8380-6059-40d0-b0ed-6c3906f84591] Running
I1121 14:44:34.248348 2835167 system_pods.go:89] "kube-apiserver-old-k8s-version-092258" [adf091a2-7b6d-4ba0-a537-9c5f7f93c471] Running
I1121 14:44:34.248352 2835167 system_pods.go:89] "kube-controller-manager-old-k8s-version-092258" [01f77916-588d-469c-b175-3bbcdfe34ce8] Running
I1121 14:44:34.248356 2835167 system_pods.go:89] "kube-proxy-tdwt5" [94e025a3-f19d-40ce-b6a6-9e2eb3b8f998] Running
I1121 14:44:34.248360 2835167 system_pods.go:89] "kube-scheduler-old-k8s-version-092258" [7dfed185-93bd-4218-9c17-a6105d34022f] Running
I1121 14:44:34.248365 2835167 system_pods.go:89] "storage-provisioner" [a31c361f-8fb6-4726-a554-e70884e4d16e] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:44:34.248380 2835167 retry.go:31] will retry after 418.10052ms: missing components: kube-dns
I1121 14:44:34.670581 2835167 system_pods.go:86] 8 kube-system pods found
I1121 14:44:34.670670 2835167 system_pods.go:89] "coredns-5dd5756b68-86stv" [6a48c3f2-f439-40e1-885b-5850f95d1ffc] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:44:34.670687 2835167 system_pods.go:89] "etcd-old-k8s-version-092258" [bbb172b1-cd74-44e9-ba24-92155ea08be4] Running
I1121 14:44:34.670694 2835167 system_pods.go:89] "kindnet-tfn5q" [6bec8380-6059-40d0-b0ed-6c3906f84591] Running
I1121 14:44:34.670698 2835167 system_pods.go:89] "kube-apiserver-old-k8s-version-092258" [adf091a2-7b6d-4ba0-a537-9c5f7f93c471] Running
I1121 14:44:34.670703 2835167 system_pods.go:89] "kube-controller-manager-old-k8s-version-092258" [01f77916-588d-469c-b175-3bbcdfe34ce8] Running
I1121 14:44:34.670707 2835167 system_pods.go:89] "kube-proxy-tdwt5" [94e025a3-f19d-40ce-b6a6-9e2eb3b8f998] Running
I1121 14:44:34.670711 2835167 system_pods.go:89] "kube-scheduler-old-k8s-version-092258" [7dfed185-93bd-4218-9c17-a6105d34022f] Running
I1121 14:44:34.670736 2835167 system_pods.go:89] "storage-provisioner" [a31c361f-8fb6-4726-a554-e70884e4d16e] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:44:34.670759 2835167 retry.go:31] will retry after 454.42102ms: missing components: kube-dns
I1121 14:44:35.130522 2835167 system_pods.go:86] 8 kube-system pods found
I1121 14:44:35.130555 2835167 system_pods.go:89] "coredns-5dd5756b68-86stv" [6a48c3f2-f439-40e1-885b-5850f95d1ffc] Running
I1121 14:44:35.130563 2835167 system_pods.go:89] "etcd-old-k8s-version-092258" [bbb172b1-cd74-44e9-ba24-92155ea08be4] Running
I1121 14:44:35.130568 2835167 system_pods.go:89] "kindnet-tfn5q" [6bec8380-6059-40d0-b0ed-6c3906f84591] Running
I1121 14:44:35.130573 2835167 system_pods.go:89] "kube-apiserver-old-k8s-version-092258" [adf091a2-7b6d-4ba0-a537-9c5f7f93c471] Running
I1121 14:44:35.130579 2835167 system_pods.go:89] "kube-controller-manager-old-k8s-version-092258" [01f77916-588d-469c-b175-3bbcdfe34ce8] Running
I1121 14:44:35.130582 2835167 system_pods.go:89] "kube-proxy-tdwt5" [94e025a3-f19d-40ce-b6a6-9e2eb3b8f998] Running
I1121 14:44:35.130586 2835167 system_pods.go:89] "kube-scheduler-old-k8s-version-092258" [7dfed185-93bd-4218-9c17-a6105d34022f] Running
I1121 14:44:35.130590 2835167 system_pods.go:89] "storage-provisioner" [a31c361f-8fb6-4726-a554-e70884e4d16e] Running
I1121 14:44:35.130598 2835167 system_pods.go:126] duration metric: took 1.531191935s to wait for k8s-apps to be running ...
I1121 14:44:35.130606 2835167 system_svc.go:44] waiting for kubelet service to be running ....
I1121 14:44:35.130663 2835167 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1121 14:44:35.145395 2835167 system_svc.go:56] duration metric: took 14.776546ms WaitForService to wait for kubelet
I1121 14:44:35.145455 2835167 kubeadm.go:587] duration metric: took 15.350619907s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1121 14:44:35.145475 2835167 node_conditions.go:102] verifying NodePressure condition ...
I1121 14:44:35.148334 2835167 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1121 14:44:35.148369 2835167 node_conditions.go:123] node cpu capacity is 2
I1121 14:44:35.148382 2835167 node_conditions.go:105] duration metric: took 2.896581ms to run NodePressure ...
I1121 14:44:35.148393 2835167 start.go:242] waiting for startup goroutines ...
I1121 14:44:35.148401 2835167 start.go:247] waiting for cluster config update ...
I1121 14:44:35.148412 2835167 start.go:256] writing updated cluster config ...
I1121 14:44:35.148743 2835167 ssh_runner.go:195] Run: rm -f paused
I1121 14:44:35.152681 2835167 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:44:35.157000 2835167 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-86stv" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.162556 2835167 pod_ready.go:94] pod "coredns-5dd5756b68-86stv" is "Ready"
I1121 14:44:35.162601 2835167 pod_ready.go:86] duration metric: took 5.502719ms for pod "coredns-5dd5756b68-86stv" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.166472 2835167 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.171935 2835167 pod_ready.go:94] pod "etcd-old-k8s-version-092258" is "Ready"
I1121 14:44:35.171965 2835167 pod_ready.go:86] duration metric: took 5.463835ms for pod "etcd-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.175582 2835167 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.181518 2835167 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-092258" is "Ready"
I1121 14:44:35.181551 2835167 pod_ready.go:86] duration metric: took 5.941771ms for pod "kube-apiserver-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.184926 2835167 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.557460 2835167 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-092258" is "Ready"
I1121 14:44:35.557489 2835167 pod_ready.go:86] duration metric: took 372.537001ms for pod "kube-controller-manager-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:35.757817 2835167 pod_ready.go:83] waiting for pod "kube-proxy-tdwt5" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:36.157592 2835167 pod_ready.go:94] pod "kube-proxy-tdwt5" is "Ready"
I1121 14:44:36.157618 2835167 pod_ready.go:86] duration metric: took 399.771111ms for pod "kube-proxy-tdwt5" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:36.357529 2835167 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:36.757566 2835167 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-092258" is "Ready"
I1121 14:44:36.757596 2835167 pod_ready.go:86] duration metric: took 400.036784ms for pod "kube-scheduler-old-k8s-version-092258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:44:36.757610 2835167 pod_ready.go:40] duration metric: took 1.604896006s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:44:36.818445 2835167 start.go:628] kubectl: 1.33.2, cluster: 1.28.0 (minor skew: 5)
I1121 14:44:36.821296 2835167 out.go:203]
W1121 14:44:36.824281 2835167 out.go:285] ! /usr/local/bin/kubectl is version 1.33.2, which may have incompatibilities with Kubernetes 1.28.0.
I1121 14:44:36.827383 2835167 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1121 14:44:36.830301 2835167 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-092258" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
ab7b2c1339a58 1611cd07b61d5 9 seconds ago Running busybox 0 befa3559e32d9 busybox default
4fa0544fe52cc 97e04611ad434 15 seconds ago Running coredns 0 b58b59f73a24b coredns-5dd5756b68-86stv kube-system
c6ace07879b84 ba04bb24b9575 15 seconds ago Running storage-provisioner 0 3680e435bb193 storage-provisioner kube-system
495595ef81ee7 b1a8c6f707935 26 seconds ago Running kindnet-cni 0 f4ddede8f051f kindnet-tfn5q kube-system
630ebb9fe56a1 940f54a5bcae9 28 seconds ago Running kube-proxy 0 1812faa70a69a kube-proxy-tdwt5 kube-system
331a280f7d8fb 46cc66ccc7c19 48 seconds ago Running kube-controller-manager 0 9d7554dad7608 kube-controller-manager-old-k8s-version-092258 kube-system
46391c1bd1fc7 762dce4090c5f 48 seconds ago Running kube-scheduler 0 88bf0a72d6a98 kube-scheduler-old-k8s-version-092258 kube-system
32a76684e0ad4 9cdd6470f48c8 48 seconds ago Running etcd 0 edaf6d16372ae etcd-old-k8s-version-092258 kube-system
2e1cd1261e99f 00543d2fe5d71 49 seconds ago Running kube-apiserver 0 58f4b63de6fd5 kube-apiserver-old-k8s-version-092258 kube-system
==> containerd <==
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.681687355Z" level=info msg="CreateContainer within sandbox \"3680e435bb193d749f6cac5ee0a23ca21a777ba606c46a9f454cb42ef4060e47\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"c6ace07879b84e705bc8b532f8cd9162404b63746ad9faeae44e245e26539ead\""
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.685318192Z" level=info msg="StartContainer for \"c6ace07879b84e705bc8b532f8cd9162404b63746ad9faeae44e245e26539ead\""
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.689247614Z" level=info msg="connecting to shim c6ace07879b84e705bc8b532f8cd9162404b63746ad9faeae44e245e26539ead" address="unix:///run/containerd/s/6e69ecb1899b9e75727f8fe7f211e1f82d40f965205bb1565eeae343c2bafd56" protocol=ttrpc version=3
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.690732150Z" level=info msg="CreateContainer within sandbox \"b58b59f73a24bb52a5f6c210ec1d0dfbddbbc55dbc0fd609423879994aa0b8ea\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"4fa0544fe52cc0b0b57fcb28182f1f20dc7c79b3ef53dcb6dc677efecd5a9cc9\""
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.693417924Z" level=info msg="StartContainer for \"4fa0544fe52cc0b0b57fcb28182f1f20dc7c79b3ef53dcb6dc677efecd5a9cc9\""
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.696432221Z" level=info msg="connecting to shim 4fa0544fe52cc0b0b57fcb28182f1f20dc7c79b3ef53dcb6dc677efecd5a9cc9" address="unix:///run/containerd/s/a5f36c12d3eba8a08addb4ff6f6c45f4b1f35adc7b831563646c8ea27992d003" protocol=ttrpc version=3
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.785690082Z" level=info msg="StartContainer for \"4fa0544fe52cc0b0b57fcb28182f1f20dc7c79b3ef53dcb6dc677efecd5a9cc9\" returns successfully"
Nov 21 14:44:33 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:33.815503407Z" level=info msg="StartContainer for \"c6ace07879b84e705bc8b532f8cd9162404b63746ad9faeae44e245e26539ead\" returns successfully"
Nov 21 14:44:37 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:37.378935963Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4fd396a4-7f86-4bac-b99a-f7427bb5deb9,Namespace:default,Attempt:0,}"
Nov 21 14:44:37 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:37.481124358Z" level=info msg="connecting to shim befa3559e32d903c1abf0bc725ae5f12a26cdbb8b3fb4a57980282d9931d9d26" address="unix:///run/containerd/s/71dcf6bf5df9beb4a3d248e771df5a382c0db1f3a2b82a021424cdeb0bc07ccb" namespace=k8s.io protocol=ttrpc version=3
Nov 21 14:44:37 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:37.544176700Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:4fd396a4-7f86-4bac-b99a-f7427bb5deb9,Namespace:default,Attempt:0,} returns sandbox id \"befa3559e32d903c1abf0bc725ae5f12a26cdbb8b3fb4a57980282d9931d9d26\""
Nov 21 14:44:37 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:37.546307355Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.885153902Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.887206340Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=1937188"
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.889567142Z" level=info msg="ImageCreate event name:\"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.893770222Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.894536985Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"1935750\" in 2.348184988s"
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.894577862Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:1611cd07b61d57dbbfebe6db242513fd51e1c02d20ba08af17a45837d86a8a8c\""
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.898592935Z" level=info msg="CreateContainer within sandbox \"befa3559e32d903c1abf0bc725ae5f12a26cdbb8b3fb4a57980282d9931d9d26\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.912721761Z" level=info msg="Container ab7b2c1339a58ca880ca0312fd5f7d62085c7261261bf3758b721a01af22d534: CDI devices from CRI Config.CDIDevices: []"
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.926024035Z" level=info msg="CreateContainer within sandbox \"befa3559e32d903c1abf0bc725ae5f12a26cdbb8b3fb4a57980282d9931d9d26\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"ab7b2c1339a58ca880ca0312fd5f7d62085c7261261bf3758b721a01af22d534\""
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.927048695Z" level=info msg="StartContainer for \"ab7b2c1339a58ca880ca0312fd5f7d62085c7261261bf3758b721a01af22d534\""
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.928757405Z" level=info msg="connecting to shim ab7b2c1339a58ca880ca0312fd5f7d62085c7261261bf3758b721a01af22d534" address="unix:///run/containerd/s/71dcf6bf5df9beb4a3d248e771df5a382c0db1f3a2b82a021424cdeb0bc07ccb" protocol=ttrpc version=3
Nov 21 14:44:39 old-k8s-version-092258 containerd[760]: time="2025-11-21T14:44:39.996819622Z" level=info msg="StartContainer for \"ab7b2c1339a58ca880ca0312fd5f7d62085c7261261bf3758b721a01af22d534\" returns successfully"
Nov 21 14:44:46 old-k8s-version-092258 containerd[760]: E1121 14:44:46.197863 760 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [4fa0544fe52cc0b0b57fcb28182f1f20dc7c79b3ef53dcb6dc677efecd5a9cc9] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = b7aacdf6a6aa730aafe4d018cac9b7b5ecfb346cba84a99f64521f87aef8b4958639c1cf97967716465791d05bd38f372615327b7cb1d93c850bae532744d54d
CoreDNS-1.10.1
linux/arm64, go1.20, 055b2c3
[INFO] 127.0.0.1:36979 - 27014 "HINFO IN 2294269810657567619.5005884824654199478. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.03500164s
==> describe nodes <==
Name: old-k8s-version-092258
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=old-k8s-version-092258
kubernetes.io/os=linux
minikube.k8s.io/commit=29e0798733fefbdc471fd2bbb38f6a7ae2a26162
minikube.k8s.io/name=old-k8s-version-092258
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_21T14_44_08_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 21 Nov 2025 14:44:04 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-092258
AcquireTime: <unset>
RenewTime: Fri, 21 Nov 2025 14:44:48 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 21 Nov 2025 14:44:37 +0000 Fri, 21 Nov 2025 14:44:00 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 21 Nov 2025 14:44:37 +0000 Fri, 21 Nov 2025 14:44:00 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 21 Nov 2025 14:44:37 +0000 Fri, 21 Nov 2025 14:44:00 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 21 Nov 2025 14:44:37 +0000 Fri, 21 Nov 2025 14:44:33 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.76.2
Hostname: old-k8s-version-092258
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 952c288fdad5a6f53a4deda5691cff59
System UUID: 9e4fe947-6f95-4914-9cd3-ccd713480a21
Boot ID: 41b0e09d-5a9a-49c9-8980-dca608ba3fce
Kernel Version: 5.15.0-1084-aws
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12s
kube-system coredns-5dd5756b68-86stv 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 29s
kube-system etcd-old-k8s-version-092258 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 42s
kube-system kindnet-tfn5q 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 30s
kube-system kube-apiserver-old-k8s-version-092258 250m (12%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system kube-controller-manager-old-k8s-version-092258 200m (10%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system kube-proxy-tdwt5 0 (0%) 0 (0%) 0 (0%) 0 (0%) 30s
kube-system kube-scheduler-old-k8s-version-092258 100m (5%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 28s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 100m (5%)
memory 220Mi (2%) 220Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 28s kube-proxy
Normal Starting 42s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 42s kubelet Node old-k8s-version-092258 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 42s kubelet Node old-k8s-version-092258 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 42s kubelet Node old-k8s-version-092258 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 42s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 30s node-controller Node old-k8s-version-092258 event: Registered Node old-k8s-version-092258 in Controller
Normal NodeReady 16s kubelet Node old-k8s-version-092258 status is now: NodeReady
==> dmesg <==
[Nov21 13:02] overlayfs: idmapped layers are currently not supported
[Nov21 13:03] overlayfs: idmapped layers are currently not supported
[Nov21 13:06] overlayfs: idmapped layers are currently not supported
[Nov21 13:08] overlayfs: idmapped layers are currently not supported
[Nov21 13:09] overlayfs: idmapped layers are currently not supported
[Nov21 13:10] overlayfs: idmapped layers are currently not supported
[ +19.808801] overlayfs: idmapped layers are currently not supported
[Nov21 13:11] overlayfs: idmapped layers are currently not supported
[Nov21 13:12] overlayfs: idmapped layers are currently not supported
[Nov21 13:13] overlayfs: idmapped layers are currently not supported
[Nov21 13:14] overlayfs: idmapped layers are currently not supported
[Nov21 13:15] overlayfs: idmapped layers are currently not supported
[ +16.772572] overlayfs: idmapped layers are currently not supported
[Nov21 13:16] overlayfs: idmapped layers are currently not supported
[Nov21 13:17] overlayfs: idmapped layers are currently not supported
[ +27.396777] overlayfs: idmapped layers are currently not supported
[Nov21 13:18] overlayfs: idmapped layers are currently not supported
[ +25.430119] overlayfs: idmapped layers are currently not supported
[Nov21 13:19] overlayfs: idmapped layers are currently not supported
[Nov21 13:20] overlayfs: idmapped layers are currently not supported
[Nov21 13:21] overlayfs: idmapped layers are currently not supported
[Nov21 13:22] overlayfs: idmapped layers are currently not supported
[Nov21 13:23] overlayfs: idmapped layers are currently not supported
[Nov21 13:24] overlayfs: idmapped layers are currently not supported
[Nov21 13:55] kauditd_printk_skb: 8 callbacks suppressed
==> etcd [32a76684e0ad48afa24dffa56bbd612225875cea5526f2fe91da5620cdd3737e] <==
{"level":"info","ts":"2025-11-21T14:44:00.857351Z","caller":"fileutil/purge.go:44","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap.db","max":5,"interval":"30s"}
{"level":"info","ts":"2025-11-21T14:44:00.860773Z","caller":"fileutil/purge.go:44","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"}
{"level":"info","ts":"2025-11-21T14:44:00.860806Z","caller":"fileutil/purge.go:44","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"}
{"level":"info","ts":"2025-11-21T14:44:00.857529Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 switched to configuration voters=(16896983918768216326)"}
{"level":"info","ts":"2025-11-21T14:44:00.861198Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","added-peer-id":"ea7e25599daad906","added-peer-peer-urls":["https://192.168.76.2:2380"]}
{"level":"info","ts":"2025-11-21T14:44:00.857558Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"ea7e25599daad906","initial-advertise-peer-urls":["https://192.168.76.2:2380"],"listen-peer-urls":["https://192.168.76.2:2380"],"advertise-client-urls":["https://192.168.76.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.76.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-21T14:44:00.857577Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-21T14:44:01.029245Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-21T14:44:01.029466Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-21T14:44:01.02957Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgPreVoteResp from ea7e25599daad906 at term 1"}
{"level":"info","ts":"2025-11-21T14:44:01.029683Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became candidate at term 2"}
{"level":"info","ts":"2025-11-21T14:44:01.029774Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 received MsgVoteResp from ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-21T14:44:01.029858Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"ea7e25599daad906 became leader at term 2"}
{"level":"info","ts":"2025-11-21T14:44:01.029946Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: ea7e25599daad906 elected leader ea7e25599daad906 at term 2"}
{"level":"info","ts":"2025-11-21T14:44:01.033213Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"ea7e25599daad906","local-member-attributes":"{Name:old-k8s-version-092258 ClientURLs:[https://192.168.76.2:2379]}","request-path":"/0/members/ea7e25599daad906/attributes","cluster-id":"6f20f2c4b2fb5f8a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-21T14:44:01.033415Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-21T14:44:01.034523Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-21T14:44:01.034765Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:44:01.035192Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-21T14:44:01.036885Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"6f20f2c4b2fb5f8a","local-member-id":"ea7e25599daad906","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:44:01.04112Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:44:01.041303Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:44:01.055053Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.76.2:2379"}
{"level":"info","ts":"2025-11-21T14:44:01.060057Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-21T14:44:01.060254Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
==> kernel <==
14:44:49 up 19:27, 0 user, load average: 2.25, 3.09, 2.75
Linux old-k8s-version-092258 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [495595ef81ee7d983a4b62890080114a468713ef14bf361720fb1ef51e30f35d] <==
I1121 14:44:22.827794 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1121 14:44:22.828022 1 main.go:139] hostIP = 192.168.76.2
podIP = 192.168.76.2
I1121 14:44:22.828147 1 main.go:148] setting mtu 1500 for CNI
I1121 14:44:22.828164 1 main.go:178] kindnetd IP family: "ipv4"
I1121 14:44:22.828175 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-21T14:44:23Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1121 14:44:23.024438 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1121 14:44:23.024516 1 controller.go:381] "Waiting for informer caches to sync"
I1121 14:44:23.024545 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1121 14:44:23.025736 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1121 14:44:23.224664 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1121 14:44:23.224747 1 metrics.go:72] Registering metrics
I1121 14:44:23.224843 1 controller.go:711] "Syncing nftables rules"
I1121 14:44:33.032002 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1121 14:44:33.032055 1 main.go:301] handling current node
I1121 14:44:43.024839 1 main.go:297] Handling node with IPs: map[192.168.76.2:{}]
I1121 14:44:43.024870 1 main.go:301] handling current node
==> kube-apiserver [2e1cd1261e99f5cf421f076a966eedd90258d75cd1735ec5e4bc9ae1d5576945] <==
I1121 14:44:04.361764 1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
I1121 14:44:04.361814 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1121 14:44:04.367613 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1121 14:44:04.367684 1 aggregator.go:166] initial CRD sync complete...
I1121 14:44:04.367697 1 autoregister_controller.go:141] Starting autoregister controller
I1121 14:44:04.367705 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1121 14:44:04.367714 1 cache.go:39] Caches are synced for autoregister controller
I1121 14:44:04.374026 1 controller.go:624] quota admission added evaluator for: namespaces
I1121 14:44:04.403168 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1121 14:44:04.422675 1 shared_informer.go:318] Caches are synced for node_authorizer
I1121 14:44:04.968994 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1121 14:44:04.974442 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1121 14:44:04.974470 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1121 14:44:05.722694 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1121 14:44:05.789801 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1121 14:44:05.889316 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1121 14:44:05.896471 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.76.2]
I1121 14:44:05.897760 1 controller.go:624] quota admission added evaluator for: endpoints
I1121 14:44:05.902883 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1121 14:44:06.203440 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1121 14:44:07.370770 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1121 14:44:07.383830 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1121 14:44:07.398248 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1121 14:44:19.834381 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1121 14:44:20.024025 1 controller.go:624] quota admission added evaluator for: replicasets.apps
==> kube-controller-manager [331a280f7d8fb0893c46a22085825d84571038b23952dd64524b062bc7f08b74] <==
I1121 14:44:19.212645 1 shared_informer.go:318] Caches are synced for endpoint
I1121 14:44:19.212738 1 shared_informer.go:318] Caches are synced for HPA
I1121 14:44:19.212773 1 shared_informer.go:318] Caches are synced for disruption
I1121 14:44:19.212803 1 shared_informer.go:318] Caches are synced for attach detach
I1121 14:44:19.627294 1 shared_informer.go:318] Caches are synced for garbage collector
I1121 14:44:19.659367 1 shared_informer.go:318] Caches are synced for garbage collector
I1121 14:44:19.659575 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1121 14:44:19.901701 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-tdwt5"
I1121 14:44:19.929907 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-tfn5q"
I1121 14:44:20.044696 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1121 14:44:20.152846 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-v7mnp"
I1121 14:44:20.183528 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-86stv"
I1121 14:44:20.224114 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="183.103851ms"
I1121 14:44:20.243991 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="19.819807ms"
I1121 14:44:20.244109 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="83.6µs"
I1121 14:44:21.107620 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1121 14:44:21.134973 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-v7mnp"
I1121 14:44:21.155140 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="49.168473ms"
I1121 14:44:21.171976 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="16.779877ms"
I1121 14:44:21.172179 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="46.497µs"
I1121 14:44:33.168291 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="88.391µs"
I1121 14:44:33.193460 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="100.346µs"
I1121 14:44:34.128063 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
I1121 14:44:34.848411 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="19.00588ms"
I1121 14:44:34.848685 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="129.432µs"
==> kube-proxy [630ebb9fe56a1bea1ef2dfe24de2086594eb0afbdaf547e41ce7c777d9eb7705] <==
I1121 14:44:20.860188 1 server_others.go:69] "Using iptables proxy"
I1121 14:44:20.878393 1 node.go:141] Successfully retrieved node IP: 192.168.76.2
I1121 14:44:20.931156 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1121 14:44:20.936886 1 server_others.go:152] "Using iptables Proxier"
I1121 14:44:20.936939 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1121 14:44:20.936948 1 server_others.go:438] "Defaulting to no-op detect-local"
I1121 14:44:20.936971 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1121 14:44:20.937761 1 server.go:846] "Version info" version="v1.28.0"
I1121 14:44:20.937784 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1121 14:44:20.938544 1 config.go:188] "Starting service config controller"
I1121 14:44:20.938593 1 shared_informer.go:311] Waiting for caches to sync for service config
I1121 14:44:20.938625 1 config.go:97] "Starting endpoint slice config controller"
I1121 14:44:20.938635 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1121 14:44:20.940292 1 config.go:315] "Starting node config controller"
I1121 14:44:20.940306 1 shared_informer.go:311] Waiting for caches to sync for node config
I1121 14:44:21.040184 1 shared_informer.go:318] Caches are synced for endpoint slice config
I1121 14:44:21.040242 1 shared_informer.go:318] Caches are synced for service config
I1121 14:44:21.040487 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [46391c1bd1fc737d22bd847c1d63f9bd14e4d892ef33d465e9204dc377dd6002] <==
W1121 14:44:04.821909 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1121 14:44:04.822028 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W1121 14:44:04.822197 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1121 14:44:04.822220 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1121 14:44:04.824601 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1121 14:44:04.825484 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1121 14:44:04.824774 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1121 14:44:04.825902 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1121 14:44:04.826065 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1121 14:44:04.826044 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1121 14:44:04.824991 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1121 14:44:04.826420 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1121 14:44:04.825212 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1121 14:44:04.825284 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1121 14:44:04.825347 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1121 14:44:04.825382 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1121 14:44:04.825431 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1121 14:44:04.824928 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1121 14:44:04.826802 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1121 14:44:04.826945 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1121 14:44:04.827063 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1121 14:44:04.827207 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1121 14:44:04.827355 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1121 14:44:04.827495 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
I1121 14:44:06.304765 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.149863 1526 kuberuntime_manager.go:1463] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.150498 1526 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.918591 1526 topology_manager.go:215] "Topology Admit Handler" podUID="94e025a3-f19d-40ce-b6a6-9e2eb3b8f998" podNamespace="kube-system" podName="kube-proxy-tdwt5"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.954210 1526 topology_manager.go:215] "Topology Admit Handler" podUID="6bec8380-6059-40d0-b0ed-6c3906f84591" podNamespace="kube-system" podName="kindnet-tfn5q"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.980360 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/94e025a3-f19d-40ce-b6a6-9e2eb3b8f998-kube-proxy\") pod \"kube-proxy-tdwt5\" (UID: \"94e025a3-f19d-40ce-b6a6-9e2eb3b8f998\") " pod="kube-system/kube-proxy-tdwt5"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.980619 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/94e025a3-f19d-40ce-b6a6-9e2eb3b8f998-xtables-lock\") pod \"kube-proxy-tdwt5\" (UID: \"94e025a3-f19d-40ce-b6a6-9e2eb3b8f998\") " pod="kube-system/kube-proxy-tdwt5"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.980760 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/94e025a3-f19d-40ce-b6a6-9e2eb3b8f998-lib-modules\") pod \"kube-proxy-tdwt5\" (UID: \"94e025a3-f19d-40ce-b6a6-9e2eb3b8f998\") " pod="kube-system/kube-proxy-tdwt5"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.980886 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/6bec8380-6059-40d0-b0ed-6c3906f84591-cni-cfg\") pod \"kindnet-tfn5q\" (UID: \"6bec8380-6059-40d0-b0ed-6c3906f84591\") " pod="kube-system/kindnet-tfn5q"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.981004 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/6bec8380-6059-40d0-b0ed-6c3906f84591-xtables-lock\") pod \"kindnet-tfn5q\" (UID: \"6bec8380-6059-40d0-b0ed-6c3906f84591\") " pod="kube-system/kindnet-tfn5q"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.981145 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/6bec8380-6059-40d0-b0ed-6c3906f84591-lib-modules\") pod \"kindnet-tfn5q\" (UID: \"6bec8380-6059-40d0-b0ed-6c3906f84591\") " pod="kube-system/kindnet-tfn5q"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.981319 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5lf7\" (UniqueName: \"kubernetes.io/projected/6bec8380-6059-40d0-b0ed-6c3906f84591-kube-api-access-m5lf7\") pod \"kindnet-tfn5q\" (UID: \"6bec8380-6059-40d0-b0ed-6c3906f84591\") " pod="kube-system/kindnet-tfn5q"
Nov 21 14:44:19 old-k8s-version-092258 kubelet[1526]: I1121 14:44:19.981442 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2rxs\" (UniqueName: \"kubernetes.io/projected/94e025a3-f19d-40ce-b6a6-9e2eb3b8f998-kube-api-access-g2rxs\") pod \"kube-proxy-tdwt5\" (UID: \"94e025a3-f19d-40ce-b6a6-9e2eb3b8f998\") " pod="kube-system/kube-proxy-tdwt5"
Nov 21 14:44:22 old-k8s-version-092258 kubelet[1526]: I1121 14:44:22.794618 1526 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-tdwt5" podStartSLOduration=3.794572825 podCreationTimestamp="2025-11-21 14:44:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:44:21.775006119 +0000 UTC m=+14.440254134" watchObservedRunningTime="2025-11-21 14:44:22.794572825 +0000 UTC m=+15.459820816"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.136665 1526 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.168328 1526 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-tfn5q" podStartSLOduration=12.176504309 podCreationTimestamp="2025-11-21 14:44:19 +0000 UTC" firstStartedPulling="2025-11-21 14:44:20.481898213 +0000 UTC m=+13.147146196" lastFinishedPulling="2025-11-21 14:44:22.473675721 +0000 UTC m=+15.138923704" observedRunningTime="2025-11-21 14:44:22.795889554 +0000 UTC m=+15.461137546" watchObservedRunningTime="2025-11-21 14:44:33.168281817 +0000 UTC m=+25.833529808"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.168810 1526 topology_manager.go:215] "Topology Admit Handler" podUID="6a48c3f2-f439-40e1-885b-5850f95d1ffc" podNamespace="kube-system" podName="coredns-5dd5756b68-86stv"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.174944 1526 topology_manager.go:215] "Topology Admit Handler" podUID="a31c361f-8fb6-4726-a554-e70884e4d16e" podNamespace="kube-system" podName="storage-provisioner"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.200360 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktnhs\" (UniqueName: \"kubernetes.io/projected/6a48c3f2-f439-40e1-885b-5850f95d1ffc-kube-api-access-ktnhs\") pod \"coredns-5dd5756b68-86stv\" (UID: \"6a48c3f2-f439-40e1-885b-5850f95d1ffc\") " pod="kube-system/coredns-5dd5756b68-86stv"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.200594 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/a31c361f-8fb6-4726-a554-e70884e4d16e-tmp\") pod \"storage-provisioner\" (UID: \"a31c361f-8fb6-4726-a554-e70884e4d16e\") " pod="kube-system/storage-provisioner"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.200711 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxf7g\" (UniqueName: \"kubernetes.io/projected/a31c361f-8fb6-4726-a554-e70884e4d16e-kube-api-access-xxf7g\") pod \"storage-provisioner\" (UID: \"a31c361f-8fb6-4726-a554-e70884e4d16e\") " pod="kube-system/storage-provisioner"
Nov 21 14:44:33 old-k8s-version-092258 kubelet[1526]: I1121 14:44:33.200832 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6a48c3f2-f439-40e1-885b-5850f95d1ffc-config-volume\") pod \"coredns-5dd5756b68-86stv\" (UID: \"6a48c3f2-f439-40e1-885b-5850f95d1ffc\") " pod="kube-system/coredns-5dd5756b68-86stv"
Nov 21 14:44:34 old-k8s-version-092258 kubelet[1526]: I1121 14:44:34.812385 1526 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=13.812339422 podCreationTimestamp="2025-11-21 14:44:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:44:34.811999567 +0000 UTC m=+27.477247550" watchObservedRunningTime="2025-11-21 14:44:34.812339422 +0000 UTC m=+27.477587405"
Nov 21 14:44:34 old-k8s-version-092258 kubelet[1526]: I1121 14:44:34.830835 1526 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-86stv" podStartSLOduration=14.83078559 podCreationTimestamp="2025-11-21 14:44:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:44:34.830509339 +0000 UTC m=+27.495757330" watchObservedRunningTime="2025-11-21 14:44:34.83078559 +0000 UTC m=+27.496033581"
Nov 21 14:44:37 old-k8s-version-092258 kubelet[1526]: I1121 14:44:37.064261 1526 topology_manager.go:215] "Topology Admit Handler" podUID="4fd396a4-7f86-4bac-b99a-f7427bb5deb9" podNamespace="default" podName="busybox"
Nov 21 14:44:37 old-k8s-version-092258 kubelet[1526]: I1121 14:44:37.128201 1526 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmbgq\" (UniqueName: \"kubernetes.io/projected/4fd396a4-7f86-4bac-b99a-f7427bb5deb9-kube-api-access-tmbgq\") pod \"busybox\" (UID: \"4fd396a4-7f86-4bac-b99a-f7427bb5deb9\") " pod="default/busybox"
==> storage-provisioner [c6ace07879b84e705bc8b532f8cd9162404b63746ad9faeae44e245e26539ead] <==
I1121 14:44:33.821827 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1121 14:44:33.835269 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1121 14:44:33.835522 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1121 14:44:33.844745 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1121 14:44:33.845108 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-092258_43824c0e-5444-4d63-9465-8f0bcb9e3d2b!
I1121 14:44:33.845246 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"d6d6cfaa-85d7-41d0-9ba2-d501adb4d7fd", APIVersion:"v1", ResourceVersion:"394", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-092258_43824c0e-5444-4d63-9465-8f0bcb9e3d2b became leader
I1121 14:44:33.946309 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-092258_43824c0e-5444-4d63-9465-8f0bcb9e3d2b!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p old-k8s-version-092258 -n old-k8s-version-092258
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-092258 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestStartStop/group/old-k8s-version/serial/DeployApp (13.60s)