=== RUN TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-128377 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [bfaec734-d874-4dcb-b31f-feb87adccfca] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [bfaec734-d874-4dcb-b31f-feb87adccfca] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 9.003838321s
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-128377 exec busybox -- /bin/sh -c "ulimit -n"
start_stop_delete_test.go:194: 'ulimit -n' returned 1024, expected 1048576
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-128377
helpers_test.go:243: (dbg) docker inspect old-k8s-version-128377:
-- stdout --
[
{
"Id": "2f10becef58704f5e7bd5cb0836d9f1660358d1387d26e05576d2fc9439102c7",
"Created": "2025-11-24T09:04:51.081869704Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 696955,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-24T09:04:51.124349133Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:133ca4ac39008d0056ad45d8cb70521d6b70d6e1b8bbff4678fd4b354efbdf70",
"ResolvConfPath": "/var/lib/docker/containers/2f10becef58704f5e7bd5cb0836d9f1660358d1387d26e05576d2fc9439102c7/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/2f10becef58704f5e7bd5cb0836d9f1660358d1387d26e05576d2fc9439102c7/hostname",
"HostsPath": "/var/lib/docker/containers/2f10becef58704f5e7bd5cb0836d9f1660358d1387d26e05576d2fc9439102c7/hosts",
"LogPath": "/var/lib/docker/containers/2f10becef58704f5e7bd5cb0836d9f1660358d1387d26e05576d2fc9439102c7/2f10becef58704f5e7bd5cb0836d9f1660358d1387d26e05576d2fc9439102c7-json.log",
"Name": "/old-k8s-version-128377",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-128377:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-128377",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "2f10becef58704f5e7bd5cb0836d9f1660358d1387d26e05576d2fc9439102c7",
"LowerDir": "/var/lib/docker/overlay2/1b1691990697dca2c1039c44453446d25814644b5c2e14c7ed7f94a719a51d83-init/diff:/var/lib/docker/overlay2/a062700147ad5d1f8f2a68f70ba6ad34ea6495dd365bcb265ab17ea27961837b/diff",
"MergedDir": "/var/lib/docker/overlay2/1b1691990697dca2c1039c44453446d25814644b5c2e14c7ed7f94a719a51d83/merged",
"UpperDir": "/var/lib/docker/overlay2/1b1691990697dca2c1039c44453446d25814644b5c2e14c7ed7f94a719a51d83/diff",
"WorkDir": "/var/lib/docker/overlay2/1b1691990697dca2c1039c44453446d25814644b5c2e14c7ed7f94a719a51d83/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-128377",
"Source": "/var/lib/docker/volumes/old-k8s-version-128377/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-128377",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-128377",
"name.minikube.sigs.k8s.io": "old-k8s-version-128377",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "1b825735b854737d663311b12a71789ec27a2117f701b1d752b938a4e9f325be",
"SandboxKey": "/var/run/docker/netns/1b825735b854",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33068"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33069"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33072"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33070"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33071"
}
]
},
"Networks": {
"old-k8s-version-128377": {
"IPAMConfig": {
"IPv4Address": "192.168.103.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "5e2ac3220d9f4f0222496592b8e5141116283ec11109477dec7a51401ec91c02",
"EndpointID": "4ad14cff7e04c8fe264f407478b59f88dc3ab8d1c7ab17924a24adb832eca462",
"Gateway": "192.168.103.1",
"IPAddress": "192.168.103.2",
"MacAddress": "be:3f:51:5a:9c:89",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-128377",
"2f10becef587"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-128377 -n old-k8s-version-128377
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-128377 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-128377 logs -n 25: (1.058474478s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬────────────────────────┬─────────┬─────────┬─────────────────────┬──────────────
───────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼────────────────────────┼─────────┼─────────┼─────────────────────┼──────────────
───────┤
│ ssh │ -p cilium-203355 sudo journalctl -xeu kubelet --all --full --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cat /etc/kubernetes/kubelet.conf │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cat /var/lib/kubelet/config.yaml │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo systemctl status docker --all --full --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ delete │ -p missing-upgrade-058813 │ missing-upgrade-058813 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ 24 Nov 25 09:04 UTC │
│ ssh │ -p cilium-203355 sudo systemctl cat docker --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cat /etc/docker/daemon.json │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo docker system info │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo systemctl status cri-docker --all --full --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo systemctl cat cri-docker --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cri-dockerd --version │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo systemctl status containerd --all --full --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo systemctl cat containerd --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cat /lib/systemd/system/containerd.service │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cat /etc/containerd/config.toml │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo containerd config dump │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo systemctl status crio --all --full --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo systemctl cat crio --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo crio config │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ delete │ -p cilium-203355 │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ 24 Nov 25 09:04 UTC │
│ start │ -p old-k8s-version-128377 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-128377 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ 24 Nov 25 09:05 UTC │
│ start │ -p no-preload-820576 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.35.0-beta.0 │ no-preload-820576 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ 24 Nov 25 09:05 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴────────────────────────┴─────────┴─────────┴─────────────────────┴──────────────
───────┘
==> Last Start <==
Log file created at: 2025/11/24 09:04:47
Running on machine: ubuntu-20-agent-10
Binary: Built with gc go1.25.3 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1124 09:04:47.686335 696018 out.go:360] Setting OutFile to fd 1 ...
I1124 09:04:47.686445 696018 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 09:04:47.686456 696018 out.go:374] Setting ErrFile to fd 2...
I1124 09:04:47.686474 696018 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 09:04:47.686683 696018 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21978-435860/.minikube/bin
I1124 09:04:47.687133 696018 out.go:368] Setting JSON to false
I1124 09:04:47.688408 696018 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-10","uptime":13624,"bootTime":1763961464,"procs":294,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1124 09:04:47.688532 696018 start.go:143] virtualization: kvm guest
I1124 09:04:47.690354 696018 out.go:179] * [no-preload-820576] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1124 09:04:47.691472 696018 out.go:179] - MINIKUBE_LOCATION=21978
I1124 09:04:47.691501 696018 notify.go:221] Checking for updates...
I1124 09:04:47.693590 696018 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1124 09:04:47.694681 696018 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21978-435860/kubeconfig
I1124 09:04:47.695683 696018 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21978-435860/.minikube
I1124 09:04:47.697109 696018 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1124 09:04:47.698248 696018 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1124 09:04:47.699807 696018 config.go:182] Loaded profile config "cert-expiration-869306": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.2
I1124 09:04:47.699947 696018 config.go:182] Loaded profile config "kubernetes-upgrade-521313": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.35.0-beta.0
I1124 09:04:47.700091 696018 config.go:182] Loaded profile config "old-k8s-version-128377": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 09:04:47.700236 696018 driver.go:422] Setting default libvirt URI to qemu:///system
I1124 09:04:47.724639 696018 docker.go:124] docker version: linux-29.0.2:Docker Engine - Community
I1124 09:04:47.724770 696018 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 09:04:47.791833 696018 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:2 ContainersRunning:2 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:52 OomKillDisable:false NGoroutines:66 SystemTime:2025-11-24 09:04:47.780432821 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1124 09:04:47.791998 696018 docker.go:319] overlay module found
I1124 09:04:47.794089 696018 out.go:179] * Using the docker driver based on user configuration
I1124 09:04:47.795621 696018 start.go:309] selected driver: docker
I1124 09:04:47.795639 696018 start.go:927] validating driver "docker" against <nil>
I1124 09:04:47.795651 696018 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1124 09:04:47.796325 696018 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 09:04:47.859511 696018 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:2 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:61 OomKillDisable:false NGoroutines:86 SystemTime:2025-11-24 09:04:47.848833175 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1124 09:04:47.859748 696018 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1124 09:04:47.859957 696018 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 09:04:47.861778 696018 out.go:179] * Using Docker driver with root privileges
I1124 09:04:47.862632 696018 cni.go:84] Creating CNI manager for ""
I1124 09:04:47.862696 696018 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 09:04:47.862708 696018 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1124 09:04:47.862775 696018 start.go:353] cluster config:
{Name:no-preload-820576 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0-beta.0 ClusterName:no-preload-820576 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local C
ontainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP
: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 09:04:47.863875 696018 out.go:179] * Starting "no-preload-820576" primary control-plane node in "no-preload-820576" cluster
I1124 09:04:47.864812 696018 cache.go:134] Beginning downloading kic base image for docker with containerd
I1124 09:04:47.865865 696018 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1124 09:04:47.866835 696018 preload.go:188] Checking if preload exists for k8s version v1.35.0-beta.0 and runtime containerd
I1124 09:04:47.866921 696018 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1124 09:04:47.866958 696018 profile.go:143] Saving config to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/config.json ...
I1124 09:04:47.867001 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/config.json: {Name:mk04f43d651118a00ac1be32029cffb149669d46 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:47.867208 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:04:47.890231 696018 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1124 09:04:47.890260 696018 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1124 09:04:47.890281 696018 cache.go:243] Successfully downloaded all kic artifacts
I1124 09:04:47.890321 696018 start.go:360] acquireMachinesLock for no-preload-820576: {Name:mk6b6fb581999217c645edacaa9c18971e97964f Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:47.890432 696018 start.go:364] duration metric: took 88.402µs to acquireMachinesLock for "no-preload-820576"
I1124 09:04:47.890474 696018 start.go:93] Provisioning new machine with config: &{Name:no-preload-820576 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0-beta.0 ClusterName:no-preload-820576 Namespace:default APIServerHAVIP: AP
IServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNS
Log:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 09:04:47.890567 696018 start.go:125] createHost starting for "" (driver="docker")
I1124 09:04:48.739369 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": read tcp 192.168.76.1:40906->192.168.76.2:8443: read: connection reset by peer
I1124 09:04:48.739430 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:04:48.740184 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:04:48.920539 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:04:48.921019 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:04:49.420530 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:04:49.420996 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:04:46.813535 695520 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1124 09:04:46.813778 695520 start.go:159] libmachine.API.Create for "old-k8s-version-128377" (driver="docker")
I1124 09:04:46.813816 695520 client.go:173] LocalClient.Create starting
I1124 09:04:46.813892 695520 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem
I1124 09:04:46.813936 695520 main.go:143] libmachine: Decoding PEM data...
I1124 09:04:46.813967 695520 main.go:143] libmachine: Parsing certificate...
I1124 09:04:46.814043 695520 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem
I1124 09:04:46.814076 695520 main.go:143] libmachine: Decoding PEM data...
I1124 09:04:46.814095 695520 main.go:143] libmachine: Parsing certificate...
I1124 09:04:46.814441 695520 cli_runner.go:164] Run: docker network inspect old-k8s-version-128377 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1124 09:04:46.831913 695520 cli_runner.go:211] docker network inspect old-k8s-version-128377 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1124 09:04:46.831996 695520 network_create.go:284] running [docker network inspect old-k8s-version-128377] to gather additional debugging logs...
I1124 09:04:46.832018 695520 cli_runner.go:164] Run: docker network inspect old-k8s-version-128377
W1124 09:04:46.848875 695520 cli_runner.go:211] docker network inspect old-k8s-version-128377 returned with exit code 1
I1124 09:04:46.848912 695520 network_create.go:287] error running [docker network inspect old-k8s-version-128377]: docker network inspect old-k8s-version-128377: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-128377 not found
I1124 09:04:46.848928 695520 network_create.go:289] output of [docker network inspect old-k8s-version-128377]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-128377 not found
** /stderr **
I1124 09:04:46.849044 695520 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 09:04:46.866840 695520 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-c654f70fdf0e IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:12:f7:ca:91:9d:ad} reservation:<nil>}
I1124 09:04:46.867443 695520 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-f1081c4000c5 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:ea:b1:6d:32:2c:78} reservation:<nil>}
I1124 09:04:46.868124 695520 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-30fdd1988974 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:f2:59:2f:0a:61:81} reservation:<nil>}
I1124 09:04:46.868877 695520 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-6cd297979890 IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:62:91:f3:e4:95:17} reservation:<nil>}
I1124 09:04:46.869272 695520 network.go:211] skipping subnet 192.168.85.0/24 that is taken: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName:br-9bf62793deff IfaceIPv4:192.168.85.1 IfaceMTU:1500 IfaceMAC:0a:d1:a9:3b:89:29} reservation:<nil>}
I1124 09:04:46.869983 695520 network.go:211] skipping subnet 192.168.94.0/24 that is taken: &{IP:192.168.94.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.94.0/24 Gateway:192.168.94.1 ClientMin:192.168.94.2 ClientMax:192.168.94.254 Broadcast:192.168.94.255 IsPrivate:true Interface:{IfaceName:br-5fa0f78c53ad IfaceIPv4:192.168.94.1 IfaceMTU:1500 IfaceMAC:9e:96:d6:0a:fe:a6} reservation:<nil>}
I1124 09:04:46.870809 695520 network.go:206] using free private subnet 192.168.103.0/24: &{IP:192.168.103.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.103.0/24 Gateway:192.168.103.1 ClientMin:192.168.103.2 ClientMax:192.168.103.254 Broadcast:192.168.103.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001e158e0}
I1124 09:04:46.870832 695520 network_create.go:124] attempt to create docker network old-k8s-version-128377 192.168.103.0/24 with gateway 192.168.103.1 and MTU of 1500 ...
I1124 09:04:46.870880 695520 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.103.0/24 --gateway=192.168.103.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-128377 old-k8s-version-128377
I1124 09:04:46.993201 695520 network_create.go:108] docker network old-k8s-version-128377 192.168.103.0/24 created
I1124 09:04:46.993243 695520 kic.go:121] calculated static IP "192.168.103.2" for the "old-k8s-version-128377" container
I1124 09:04:46.993321 695520 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1124 09:04:47.015308 695520 cli_runner.go:164] Run: docker volume create old-k8s-version-128377 --label name.minikube.sigs.k8s.io=old-k8s-version-128377 --label created_by.minikube.sigs.k8s.io=true
I1124 09:04:47.034791 695520 oci.go:103] Successfully created a docker volume old-k8s-version-128377
I1124 09:04:47.034869 695520 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-128377-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-128377 --entrypoint /usr/bin/test -v old-k8s-version-128377:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1124 09:04:47.772927 695520 oci.go:107] Successfully prepared a docker volume old-k8s-version-128377
I1124 09:04:47.773023 695520 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 09:04:47.773041 695520 kic.go:194] Starting extracting preloaded images to volume ...
I1124 09:04:47.773133 695520 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21978-435860/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-128377:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir
I1124 09:04:50.987600 695520 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21978-435860/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-128377:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir: (3.214396647s)
I1124 09:04:50.987639 695520 kic.go:203] duration metric: took 3.214593361s to extract preloaded images to volume ...
W1124 09:04:50.987789 695520 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1124 09:04:50.987849 695520 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1124 09:04:50.987920 695520 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1124 09:04:51.061728 695520 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-128377 --name old-k8s-version-128377 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-128377 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-128377 --network old-k8s-version-128377 --ip 192.168.103.2 --volume old-k8s-version-128377:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1124 09:04:51.401514 695520 cli_runner.go:164] Run: docker container inspect old-k8s-version-128377 --format={{.State.Running}}
I1124 09:04:51.426748 695520 cli_runner.go:164] Run: docker container inspect old-k8s-version-128377 --format={{.State.Status}}
I1124 09:04:51.456228 695520 cli_runner.go:164] Run: docker exec old-k8s-version-128377 stat /var/lib/dpkg/alternatives/iptables
I1124 09:04:51.515517 695520 oci.go:144] the created container "old-k8s-version-128377" has a running status.
I1124 09:04:51.515571 695520 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa...
I1124 09:04:47.893309 696018 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1124 09:04:47.893645 696018 start.go:159] libmachine.API.Create for "no-preload-820576" (driver="docker")
I1124 09:04:47.893687 696018 client.go:173] LocalClient.Create starting
I1124 09:04:47.893789 696018 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem
I1124 09:04:47.893833 696018 main.go:143] libmachine: Decoding PEM data...
I1124 09:04:47.893861 696018 main.go:143] libmachine: Parsing certificate...
I1124 09:04:47.893953 696018 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem
I1124 09:04:47.893982 696018 main.go:143] libmachine: Decoding PEM data...
I1124 09:04:47.893999 696018 main.go:143] libmachine: Parsing certificate...
I1124 09:04:47.894436 696018 cli_runner.go:164] Run: docker network inspect no-preload-820576 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1124 09:04:47.915789 696018 cli_runner.go:211] docker network inspect no-preload-820576 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1124 09:04:47.915886 696018 network_create.go:284] running [docker network inspect no-preload-820576] to gather additional debugging logs...
I1124 09:04:47.915925 696018 cli_runner.go:164] Run: docker network inspect no-preload-820576
W1124 09:04:47.939725 696018 cli_runner.go:211] docker network inspect no-preload-820576 returned with exit code 1
I1124 09:04:47.939760 696018 network_create.go:287] error running [docker network inspect no-preload-820576]: docker network inspect no-preload-820576: exit status 1
stdout:
[]
stderr:
Error response from daemon: network no-preload-820576 not found
I1124 09:04:47.939788 696018 network_create.go:289] output of [docker network inspect no-preload-820576]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network no-preload-820576 not found
** /stderr **
I1124 09:04:47.939956 696018 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 09:04:47.960368 696018 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-c654f70fdf0e IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:12:f7:ca:91:9d:ad} reservation:<nil>}
I1124 09:04:47.961456 696018 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-f1081c4000c5 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:ea:b1:6d:32:2c:78} reservation:<nil>}
I1124 09:04:47.962397 696018 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-30fdd1988974 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:f2:59:2f:0a:61:81} reservation:<nil>}
I1124 09:04:47.963597 696018 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-6cd297979890 IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:62:91:f3:e4:95:17} reservation:<nil>}
I1124 09:04:47.964832 696018 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001e9cf50}
I1124 09:04:47.964868 696018 network_create.go:124] attempt to create docker network no-preload-820576 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1124 09:04:47.964929 696018 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=no-preload-820576 no-preload-820576
I1124 09:04:48.017684 696018 network_create.go:108] docker network no-preload-820576 192.168.85.0/24 created
I1124 09:04:48.017725 696018 kic.go:121] calculated static IP "192.168.85.2" for the "no-preload-820576" container
I1124 09:04:48.017804 696018 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1124 09:04:48.037793 696018 cli_runner.go:164] Run: docker volume create no-preload-820576 --label name.minikube.sigs.k8s.io=no-preload-820576 --label created_by.minikube.sigs.k8s.io=true
I1124 09:04:48.057638 696018 oci.go:103] Successfully created a docker volume no-preload-820576
I1124 09:04:48.057738 696018 cli_runner.go:164] Run: docker run --rm --name no-preload-820576-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-820576 --entrypoint /usr/bin/test -v no-preload-820576:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1124 09:04:48.192090 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:04:48.509962 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:04:48.827547 696018 cache.go:107] acquiring lock: {Name:mkbcabeb5a23ff077ffdad64c71e9fe699d94040 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.827544 696018 cache.go:107] acquiring lock: {Name:mk92c82896924ab47423467b25ccd98ee4128baa Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.827656 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 exists
I1124 09:04:48.827672 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.35.0-beta.0 exists
I1124 09:04:48.827672 696018 cache.go:96] cache image "gcr.io/k8s-minikube/storage-provisioner:v5" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5" took 138.757µs
I1124 09:04:48.827689 696018 cache.go:80] save to tar file gcr.io/k8s-minikube/storage-provisioner:v5 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 succeeded
I1124 09:04:48.827683 696018 cache.go:96] cache image "registry.k8s.io/kube-apiserver:v1.35.0-beta.0" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.35.0-beta.0" took 176.678µs
I1124 09:04:48.827708 696018 cache.go:80] save to tar file registry.k8s.io/kube-apiserver:v1.35.0-beta.0 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.35.0-beta.0 succeeded
I1124 09:04:48.827708 696018 cache.go:107] acquiring lock: {Name:mkf3a006b133f81ed32779d427a8d0a9b25f9000 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.827735 696018 cache.go:107] acquiring lock: {Name:mkd74819cb24442927f7fb2cffd47478de40e14c Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.827766 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 exists
I1124 09:04:48.827773 696018 cache.go:96] cache image "registry.k8s.io/pause:3.10.1" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1" took 69.196µs
I1124 09:04:48.827780 696018 cache.go:80] save to tar file registry.k8s.io/pause:3.10.1 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 succeeded
I1124 09:04:48.827788 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.5.24-0 exists
I1124 09:04:48.827796 696018 cache.go:96] cache image "registry.k8s.io/etcd:3.5.24-0" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.5.24-0" took 65.204µs
I1124 09:04:48.827804 696018 cache.go:80] save to tar file registry.k8s.io/etcd:3.5.24-0 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.5.24-0 succeeded
I1124 09:04:48.827791 696018 cache.go:107] acquiring lock: {Name:mk6b573bbd33cfc3c3f77668030fb064598572fd Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.827820 696018 cache.go:107] acquiring lock: {Name:mk7f052905284f586f4f1cf24b8c34cc48e0b85b Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.827866 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.35.0-beta.0 exists
I1124 09:04:48.827873 696018 cache.go:96] cache image "registry.k8s.io/kube-scheduler:v1.35.0-beta.0" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.35.0-beta.0" took 57.027µs
I1124 09:04:48.827882 696018 cache.go:80] save to tar file registry.k8s.io/kube-scheduler:v1.35.0-beta.0 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.35.0-beta.0 succeeded
I1124 09:04:48.827796 696018 cache.go:107] acquiring lock: {Name:mk1d635b72f6d026600360916178f900a450350e Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.827887 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.13.1 exists
I1124 09:04:48.827900 696018 cache.go:96] cache image "registry.k8s.io/coredns/coredns:v1.13.1" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.13.1" took 115.907µs
I1124 09:04:48.827910 696018 cache.go:80] save to tar file registry.k8s.io/coredns/coredns:v1.13.1 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.13.1 succeeded
I1124 09:04:48.827914 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.35.0-beta.0 exists
I1124 09:04:48.827921 696018 cache.go:96] cache image "registry.k8s.io/kube-controller-manager:v1.35.0-beta.0" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.35.0-beta.0" took 128.45µs
I1124 09:04:48.827937 696018 cache.go:80] save to tar file registry.k8s.io/kube-controller-manager:v1.35.0-beta.0 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.35.0-beta.0 succeeded
I1124 09:04:48.827719 696018 cache.go:107] acquiring lock: {Name:mk8023690ce5b18d9a1789b2f878bf92c1381799 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.828021 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.35.0-beta.0 exists
I1124 09:04:48.828033 696018 cache.go:96] cache image "registry.k8s.io/kube-proxy:v1.35.0-beta.0" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.35.0-beta.0" took 327.502µs
I1124 09:04:48.828051 696018 cache.go:80] save to tar file registry.k8s.io/kube-proxy:v1.35.0-beta.0 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.35.0-beta.0 succeeded
I1124 09:04:48.828067 696018 cache.go:87] Successfully saved all images to host disk.
I1124 09:04:50.353018 696018 cli_runner.go:217] Completed: docker run --rm --name no-preload-820576-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-820576 --entrypoint /usr/bin/test -v no-preload-820576:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib: (2.295229864s)
I1124 09:04:50.353061 696018 oci.go:107] Successfully prepared a docker volume no-preload-820576
I1124 09:04:50.353130 696018 preload.go:188] Checking if preload exists for k8s version v1.35.0-beta.0 and runtime containerd
W1124 09:04:50.353205 696018 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1124 09:04:50.353233 696018 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1124 09:04:50.353275 696018 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1124 09:04:50.412447 696018 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname no-preload-820576 --name no-preload-820576 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-820576 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=no-preload-820576 --network no-preload-820576 --ip 192.168.85.2 --volume no-preload-820576:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1124 09:04:51.174340 696018 cli_runner.go:164] Run: docker container inspect no-preload-820576 --format={{.State.Running}}
I1124 09:04:51.195074 696018 cli_runner.go:164] Run: docker container inspect no-preload-820576 --format={{.State.Status}}
I1124 09:04:51.216706 696018 cli_runner.go:164] Run: docker exec no-preload-820576 stat /var/lib/dpkg/alternatives/iptables
I1124 09:04:51.270513 696018 oci.go:144] the created container "no-preload-820576" has a running status.
I1124 09:04:51.270555 696018 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa...
I1124 09:04:51.639069 696018 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1124 09:04:51.669871 696018 cli_runner.go:164] Run: docker container inspect no-preload-820576 --format={{.State.Status}}
I1124 09:04:51.693409 696018 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1124 09:04:51.693441 696018 kic_runner.go:114] Args: [docker exec --privileged no-preload-820576 chown docker:docker /home/docker/.ssh/authorized_keys]
I1124 09:04:51.754414 696018 cli_runner.go:164] Run: docker container inspect no-preload-820576 --format={{.State.Status}}
I1124 09:04:51.781590 696018 machine.go:94] provisionDockerMachine start ...
I1124 09:04:51.781685 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:51.808597 696018 main.go:143] libmachine: Using SSH client type: native
I1124 09:04:51.809054 696018 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1124 09:04:51.809092 696018 main.go:143] libmachine: About to run SSH command:
hostname
I1124 09:04:51.963230 696018 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-820576
I1124 09:04:51.963276 696018 ubuntu.go:182] provisioning hostname "no-preload-820576"
I1124 09:04:51.963339 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:51.984069 696018 main.go:143] libmachine: Using SSH client type: native
I1124 09:04:51.984406 696018 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1124 09:04:51.984432 696018 main.go:143] libmachine: About to run SSH command:
sudo hostname no-preload-820576 && echo "no-preload-820576" | sudo tee /etc/hostname
I1124 09:04:52.142431 696018 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-820576
I1124 09:04:52.142545 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:52.163141 696018 main.go:143] libmachine: Using SSH client type: native
I1124 09:04:52.163483 696018 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1124 09:04:52.163520 696018 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sno-preload-820576' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 no-preload-820576/g' /etc/hosts;
else
echo '127.0.1.1 no-preload-820576' | sudo tee -a /etc/hosts;
fi
fi
I1124 09:04:52.313074 696018 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1124 09:04:52.313103 696018 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21978-435860/.minikube CaCertPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21978-435860/.minikube}
I1124 09:04:52.313151 696018 ubuntu.go:190] setting up certificates
I1124 09:04:52.313174 696018 provision.go:84] configureAuth start
I1124 09:04:52.313241 696018 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-820576
I1124 09:04:52.333178 696018 provision.go:143] copyHostCerts
I1124 09:04:52.333250 696018 exec_runner.go:144] found /home/jenkins/minikube-integration/21978-435860/.minikube/ca.pem, removing ...
I1124 09:04:52.333267 696018 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21978-435860/.minikube/ca.pem
I1124 09:04:52.333340 696018 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21978-435860/.minikube/ca.pem (1082 bytes)
I1124 09:04:52.333454 696018 exec_runner.go:144] found /home/jenkins/minikube-integration/21978-435860/.minikube/cert.pem, removing ...
I1124 09:04:52.333479 696018 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21978-435860/.minikube/cert.pem
I1124 09:04:52.333527 696018 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21978-435860/.minikube/cert.pem (1123 bytes)
I1124 09:04:52.333610 696018 exec_runner.go:144] found /home/jenkins/minikube-integration/21978-435860/.minikube/key.pem, removing ...
I1124 09:04:52.333631 696018 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21978-435860/.minikube/key.pem
I1124 09:04:52.333670 696018 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21978-435860/.minikube/key.pem (1675 bytes)
I1124 09:04:52.333745 696018 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21978-435860/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca-key.pem org=jenkins.no-preload-820576 san=[127.0.0.1 192.168.85.2 localhost minikube no-preload-820576]
I1124 09:04:52.372869 696018 provision.go:177] copyRemoteCerts
I1124 09:04:52.372936 696018 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1124 09:04:52.372984 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:52.391516 696018 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa Username:docker}
I1124 09:04:52.495715 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1124 09:04:52.515508 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1124 09:04:52.533110 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1124 09:04:52.549620 696018 provision.go:87] duration metric: took 236.431147ms to configureAuth
I1124 09:04:52.549643 696018 ubuntu.go:206] setting minikube options for container-runtime
I1124 09:04:52.549785 696018 config.go:182] Loaded profile config "no-preload-820576": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.35.0-beta.0
I1124 09:04:52.549795 696018 machine.go:97] duration metric: took 768.185276ms to provisionDockerMachine
I1124 09:04:52.549801 696018 client.go:176] duration metric: took 4.656107804s to LocalClient.Create
I1124 09:04:52.549817 696018 start.go:167] duration metric: took 4.656176839s to libmachine.API.Create "no-preload-820576"
I1124 09:04:52.549827 696018 start.go:293] postStartSetup for "no-preload-820576" (driver="docker")
I1124 09:04:52.549837 696018 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1124 09:04:52.549917 696018 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1124 09:04:52.549957 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:52.567598 696018 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa Username:docker}
I1124 09:04:52.670209 696018 ssh_runner.go:195] Run: cat /etc/os-release
I1124 09:04:52.673794 696018 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1124 09:04:52.673819 696018 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1124 09:04:52.673829 696018 filesync.go:126] Scanning /home/jenkins/minikube-integration/21978-435860/.minikube/addons for local assets ...
I1124 09:04:52.673873 696018 filesync.go:126] Scanning /home/jenkins/minikube-integration/21978-435860/.minikube/files for local assets ...
I1124 09:04:52.673954 696018 filesync.go:149] local asset: /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem -> 4395242.pem in /etc/ssl/certs
I1124 09:04:52.674055 696018 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1124 09:04:52.681571 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem --> /etc/ssl/certs/4395242.pem (1708 bytes)
I1124 09:04:51.668051 695520 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1124 09:04:51.701732 695520 cli_runner.go:164] Run: docker container inspect old-k8s-version-128377 --format={{.State.Status}}
I1124 09:04:51.724111 695520 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1124 09:04:51.724139 695520 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-128377 chown docker:docker /home/docker/.ssh/authorized_keys]
I1124 09:04:51.779671 695520 cli_runner.go:164] Run: docker container inspect old-k8s-version-128377 --format={{.State.Status}}
I1124 09:04:51.808240 695520 machine.go:94] provisionDockerMachine start ...
I1124 09:04:51.808514 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:51.833533 695520 main.go:143] libmachine: Using SSH client type: native
I1124 09:04:51.833868 695520 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33068 <nil> <nil>}
I1124 09:04:51.833890 695520 main.go:143] libmachine: About to run SSH command:
hostname
I1124 09:04:51.988683 695520 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-128377
I1124 09:04:51.988712 695520 ubuntu.go:182] provisioning hostname "old-k8s-version-128377"
I1124 09:04:51.988769 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:52.008953 695520 main.go:143] libmachine: Using SSH client type: native
I1124 09:04:52.009275 695520 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33068 <nil> <nil>}
I1124 09:04:52.009299 695520 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-128377 && echo "old-k8s-version-128377" | sudo tee /etc/hostname
I1124 09:04:52.164712 695520 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-128377
I1124 09:04:52.164811 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:52.184388 695520 main.go:143] libmachine: Using SSH client type: native
I1124 09:04:52.184674 695520 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33068 <nil> <nil>}
I1124 09:04:52.184701 695520 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-128377' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-128377/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-128377' | sudo tee -a /etc/hosts;
fi
fi
I1124 09:04:52.328284 695520 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1124 09:04:52.328315 695520 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21978-435860/.minikube CaCertPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21978-435860/.minikube}
I1124 09:04:52.328349 695520 ubuntu.go:190] setting up certificates
I1124 09:04:52.328371 695520 provision.go:84] configureAuth start
I1124 09:04:52.328437 695520 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-128377
I1124 09:04:52.347382 695520 provision.go:143] copyHostCerts
I1124 09:04:52.347441 695520 exec_runner.go:144] found /home/jenkins/minikube-integration/21978-435860/.minikube/ca.pem, removing ...
I1124 09:04:52.347449 695520 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21978-435860/.minikube/ca.pem
I1124 09:04:52.347530 695520 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21978-435860/.minikube/ca.pem (1082 bytes)
I1124 09:04:52.347615 695520 exec_runner.go:144] found /home/jenkins/minikube-integration/21978-435860/.minikube/cert.pem, removing ...
I1124 09:04:52.347624 695520 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21978-435860/.minikube/cert.pem
I1124 09:04:52.347646 695520 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21978-435860/.minikube/cert.pem (1123 bytes)
I1124 09:04:52.347699 695520 exec_runner.go:144] found /home/jenkins/minikube-integration/21978-435860/.minikube/key.pem, removing ...
I1124 09:04:52.347707 695520 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21978-435860/.minikube/key.pem
I1124 09:04:52.347724 695520 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21978-435860/.minikube/key.pem (1675 bytes)
I1124 09:04:52.347767 695520 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21978-435860/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-128377 san=[127.0.0.1 192.168.103.2 localhost minikube old-k8s-version-128377]
I1124 09:04:52.449836 695520 provision.go:177] copyRemoteCerts
I1124 09:04:52.449907 695520 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1124 09:04:52.449955 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:52.467389 695520 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33068 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa Username:docker}
I1124 09:04:52.568756 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1124 09:04:52.590911 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1124 09:04:52.608291 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1124 09:04:52.625476 695520 provision.go:87] duration metric: took 297.076146ms to configureAuth
I1124 09:04:52.625501 695520 ubuntu.go:206] setting minikube options for container-runtime
I1124 09:04:52.625684 695520 config.go:182] Loaded profile config "old-k8s-version-128377": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 09:04:52.625697 695520 machine.go:97] duration metric: took 817.329123ms to provisionDockerMachine
I1124 09:04:52.625703 695520 client.go:176] duration metric: took 5.811878386s to LocalClient.Create
I1124 09:04:52.625724 695520 start.go:167] duration metric: took 5.811947677s to libmachine.API.Create "old-k8s-version-128377"
I1124 09:04:52.625737 695520 start.go:293] postStartSetup for "old-k8s-version-128377" (driver="docker")
I1124 09:04:52.625751 695520 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1124 09:04:52.625805 695520 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1124 09:04:52.625861 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:52.643125 695520 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33068 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa Username:docker}
I1124 09:04:52.746507 695520 ssh_runner.go:195] Run: cat /etc/os-release
I1124 09:04:52.750419 695520 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1124 09:04:52.750446 695520 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1124 09:04:52.750471 695520 filesync.go:126] Scanning /home/jenkins/minikube-integration/21978-435860/.minikube/addons for local assets ...
I1124 09:04:52.750527 695520 filesync.go:126] Scanning /home/jenkins/minikube-integration/21978-435860/.minikube/files for local assets ...
I1124 09:04:52.750621 695520 filesync.go:149] local asset: /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem -> 4395242.pem in /etc/ssl/certs
I1124 09:04:52.750735 695520 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1124 09:04:52.759275 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem --> /etc/ssl/certs/4395242.pem (1708 bytes)
I1124 09:04:52.779524 695520 start.go:296] duration metric: took 153.769147ms for postStartSetup
I1124 09:04:52.779876 695520 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-128377
I1124 09:04:52.797331 695520 profile.go:143] Saving config to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/config.json ...
I1124 09:04:52.797607 695520 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1124 09:04:52.797652 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:52.814633 695520 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33068 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa Username:docker}
I1124 09:04:52.914421 695520 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1124 09:04:52.919231 695520 start.go:128] duration metric: took 6.107446039s to createHost
I1124 09:04:52.919259 695520 start.go:83] releasing machines lock for "old-k8s-version-128377", held for 6.10762389s
I1124 09:04:52.919326 695520 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-128377
I1124 09:04:52.937920 695520 ssh_runner.go:195] Run: cat /version.json
I1124 09:04:52.937964 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:52.937993 695520 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1124 09:04:52.938073 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:52.957005 695520 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33068 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa Username:docker}
I1124 09:04:52.957162 695520 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33068 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa Username:docker}
I1124 09:04:53.162492 695520 ssh_runner.go:195] Run: systemctl --version
I1124 09:04:53.168749 695520 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1124 09:04:53.173128 695520 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1124 09:04:53.173198 695520 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1124 09:04:53.196703 695520 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1124 09:04:53.196732 695520 start.go:496] detecting cgroup driver to use...
I1124 09:04:53.196770 695520 detect.go:190] detected "systemd" cgroup driver on host os
I1124 09:04:53.196824 695520 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1124 09:04:53.212821 695520 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1124 09:04:53.226105 695520 docker.go:218] disabling cri-docker service (if available) ...
I1124 09:04:53.226149 695520 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1124 09:04:53.245323 695520 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1124 09:04:53.261892 695520 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1124 09:04:53.346225 695520 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1124 09:04:53.440817 695520 docker.go:234] disabling docker service ...
I1124 09:04:53.440886 695520 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1124 09:04:53.466043 695520 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1124 09:04:53.478621 695520 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1124 09:04:53.566248 695520 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1124 09:04:53.652228 695520 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1124 09:04:53.665204 695520 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1124 09:04:53.679300 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1124 09:04:53.689354 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1124 09:04:53.697996 695520 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1124 09:04:53.698043 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1124 09:04:53.706349 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 09:04:53.715138 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1124 09:04:53.724198 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 09:04:53.732594 695520 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1124 09:04:53.740362 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1124 09:04:53.748766 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1124 09:04:53.757048 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1124 09:04:53.765265 695520 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1124 09:04:53.772343 695520 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1124 09:04:53.779254 695520 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 09:04:53.856087 695520 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1124 09:04:53.959050 695520 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1124 09:04:53.959110 695520 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1124 09:04:53.963133 695520 start.go:564] Will wait 60s for crictl version
I1124 09:04:53.963185 695520 ssh_runner.go:195] Run: which crictl
I1124 09:04:53.966895 695520 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1124 09:04:53.994878 695520 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1124 09:04:53.994934 695520 ssh_runner.go:195] Run: containerd --version
I1124 09:04:54.021265 695520 ssh_runner.go:195] Run: containerd --version
I1124 09:04:54.045827 695520 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1124 09:04:52.701569 696018 start.go:296] duration metric: took 151.731915ms for postStartSetup
I1124 09:04:52.701858 696018 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-820576
I1124 09:04:52.719203 696018 profile.go:143] Saving config to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/config.json ...
I1124 09:04:52.719424 696018 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1124 09:04:52.719488 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:52.736084 696018 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa Username:docker}
I1124 09:04:52.835481 696018 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1124 09:04:52.840061 696018 start.go:128] duration metric: took 4.94947332s to createHost
I1124 09:04:52.840083 696018 start.go:83] releasing machines lock for "no-preload-820576", held for 4.94964132s
I1124 09:04:52.840148 696018 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-820576
I1124 09:04:52.858132 696018 ssh_runner.go:195] Run: cat /version.json
I1124 09:04:52.858160 696018 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1124 09:04:52.858222 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:52.858246 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:52.877130 696018 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa Username:docker}
I1124 09:04:52.877482 696018 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa Username:docker}
I1124 09:04:52.975607 696018 ssh_runner.go:195] Run: systemctl --version
I1124 09:04:53.031452 696018 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1124 09:04:53.036065 696018 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1124 09:04:53.036130 696018 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1124 09:04:53.059999 696018 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1124 09:04:53.060024 696018 start.go:496] detecting cgroup driver to use...
I1124 09:04:53.060062 696018 detect.go:190] detected "systemd" cgroup driver on host os
I1124 09:04:53.060105 696018 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1124 09:04:53.074505 696018 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1124 09:04:53.086089 696018 docker.go:218] disabling cri-docker service (if available) ...
I1124 09:04:53.086143 696018 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1124 09:04:53.101555 696018 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1124 09:04:53.118093 696018 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1124 09:04:53.204201 696018 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1124 09:04:53.300933 696018 docker.go:234] disabling docker service ...
I1124 09:04:53.301034 696018 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1124 09:04:53.320036 696018 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1124 09:04:53.331959 696018 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1124 09:04:53.420508 696018 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1124 09:04:53.513830 696018 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1124 09:04:53.526253 696018 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1124 09:04:53.540562 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:04:53.865082 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1124 09:04:53.876277 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1124 09:04:53.885584 696018 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1124 09:04:53.885655 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1124 09:04:53.895158 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 09:04:53.904766 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1124 09:04:53.913841 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 09:04:53.922747 696018 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1124 09:04:53.932360 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1124 09:04:53.943272 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1124 09:04:53.952416 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1124 09:04:53.961850 696018 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1124 09:04:53.969795 696018 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1124 09:04:53.977270 696018 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 09:04:54.067216 696018 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1124 09:04:54.151776 696018 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1124 09:04:54.151849 696018 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1124 09:04:54.156309 696018 start.go:564] Will wait 60s for crictl version
I1124 09:04:54.156367 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:54.160683 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1124 09:04:54.187130 696018 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1124 09:04:54.187193 696018 ssh_runner.go:195] Run: containerd --version
I1124 09:04:54.208524 696018 ssh_runner.go:195] Run: containerd --version
I1124 09:04:54.233294 696018 out.go:179] * Preparing Kubernetes v1.35.0-beta.0 on containerd 2.1.5 ...
I1124 09:04:49.920675 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:04:49.921171 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:04:50.420805 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:04:50.421212 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:04:50.920534 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:04:54.046841 695520 cli_runner.go:164] Run: docker network inspect old-k8s-version-128377 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 09:04:54.064168 695520 ssh_runner.go:195] Run: grep 192.168.103.1 host.minikube.internal$ /etc/hosts
I1124 09:04:54.068915 695520 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.103.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 09:04:54.079411 695520 kubeadm.go:884] updating cluster {Name:old-k8s-version-128377 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-128377 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false C
ustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1124 09:04:54.079584 695520 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 09:04:54.079651 695520 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 09:04:54.105064 695520 containerd.go:627] all images are preloaded for containerd runtime.
I1124 09:04:54.105092 695520 containerd.go:534] Images already preloaded, skipping extraction
I1124 09:04:54.105153 695520 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 09:04:54.131723 695520 containerd.go:627] all images are preloaded for containerd runtime.
I1124 09:04:54.131746 695520 cache_images.go:86] Images are preloaded, skipping loading
I1124 09:04:54.131756 695520 kubeadm.go:935] updating node { 192.168.103.2 8443 v1.28.0 containerd true true} ...
I1124 09:04:54.131858 695520 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-128377 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.103.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-128377 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1124 09:04:54.131921 695520 ssh_runner.go:195] Run: sudo crictl info
I1124 09:04:54.160918 695520 cni.go:84] Creating CNI manager for ""
I1124 09:04:54.160940 695520 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 09:04:54.160955 695520 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1124 09:04:54.160976 695520 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.103.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-128377 NodeName:old-k8s-version-128377 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.103.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.103.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.cr
t StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1124 09:04:54.161123 695520 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.103.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-128377"
kubeletExtraArgs:
node-ip: 192.168.103.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.103.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1124 09:04:54.161190 695520 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1124 09:04:54.169102 695520 binaries.go:51] Found k8s binaries, skipping transfer
I1124 09:04:54.169150 695520 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1124 09:04:54.176962 695520 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (327 bytes)
I1124 09:04:54.191252 695520 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1124 09:04:54.206931 695520 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2178 bytes)
I1124 09:04:54.220958 695520 ssh_runner.go:195] Run: grep 192.168.103.2 control-plane.minikube.internal$ /etc/hosts
I1124 09:04:54.225158 695520 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.103.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 09:04:54.236116 695520 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 09:04:54.319599 695520 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 09:04:54.342135 695520 certs.go:69] Setting up /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377 for IP: 192.168.103.2
I1124 09:04:54.342157 695520 certs.go:195] generating shared ca certs ...
I1124 09:04:54.342176 695520 certs.go:227] acquiring lock for ca certs: {Name:mk977567029a87925dffc7f909bfa5f74bf239fc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:54.342355 695520 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21978-435860/.minikube/ca.key
I1124 09:04:54.342406 695520 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21978-435860/.minikube/proxy-client-ca.key
I1124 09:04:54.342416 695520 certs.go:257] generating profile certs ...
I1124 09:04:54.342497 695520 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/client.key
I1124 09:04:54.342513 695520 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/client.crt with IP's: []
I1124 09:04:54.488402 695520 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/client.crt ...
I1124 09:04:54.488432 695520 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/client.crt: {Name:mk87cd521056210340bc5798f0387b3f36dc4635 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:54.488613 695520 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/client.key ...
I1124 09:04:54.488628 695520 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/client.key: {Name:mk03c81f6da2f2b54dfd9fa0e30866e3372921ee Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:54.488712 695520 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.key.f2d0a0c1
I1124 09:04:54.488729 695520 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.crt.f2d0a0c1 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.103.2]
I1124 09:04:54.543616 695520 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.crt.f2d0a0c1 ...
I1124 09:04:54.543654 695520 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.crt.f2d0a0c1: {Name:mk2f5faeeb1a8cba2153625fbd7d3a7e54f95aaf Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:54.543851 695520 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.key.f2d0a0c1 ...
I1124 09:04:54.543873 695520 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.key.f2d0a0c1: {Name:mk7ed4cadcafdc2e1a661255372b702ae6719654 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:54.543964 695520 certs.go:382] copying /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.crt.f2d0a0c1 -> /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.crt
I1124 09:04:54.544040 695520 certs.go:386] copying /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.key.f2d0a0c1 -> /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.key
I1124 09:04:54.544132 695520 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.key
I1124 09:04:54.544150 695520 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.crt with IP's: []
I1124 09:04:54.594781 695520 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.crt ...
I1124 09:04:54.594837 695520 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.crt: {Name:mk33ff647329a0bdf714fd27ddf109ec15b6d483 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:54.595015 695520 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.key ...
I1124 09:04:54.595034 695520 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.key: {Name:mk9bf52d92c35c053f63b6073f2a38e1ff2182d9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:54.595287 695520 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/439524.pem (1338 bytes)
W1124 09:04:54.595344 695520 certs.go:480] ignoring /home/jenkins/minikube-integration/21978-435860/.minikube/certs/439524_empty.pem, impossibly tiny 0 bytes
I1124 09:04:54.595359 695520 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca-key.pem (1675 bytes)
I1124 09:04:54.595395 695520 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem (1082 bytes)
I1124 09:04:54.595433 695520 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem (1123 bytes)
I1124 09:04:54.595484 695520 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/key.pem (1675 bytes)
I1124 09:04:54.595553 695520 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem (1708 bytes)
I1124 09:04:54.596350 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1124 09:04:54.616384 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1124 09:04:54.633998 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1124 09:04:54.651552 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1124 09:04:54.669737 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1124 09:04:54.686876 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1124 09:04:54.703726 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1124 09:04:54.720840 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1124 09:04:54.737534 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1124 09:04:54.757717 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/certs/439524.pem --> /usr/share/ca-certificates/439524.pem (1338 bytes)
I1124 09:04:54.774715 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem --> /usr/share/ca-certificates/4395242.pem (1708 bytes)
I1124 09:04:54.791052 695520 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1124 09:04:54.802968 695520 ssh_runner.go:195] Run: openssl version
I1124 09:04:54.808893 695520 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1124 09:04:54.816748 695520 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1124 09:04:54.820220 695520 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 24 08:30 /usr/share/ca-certificates/minikubeCA.pem
I1124 09:04:54.820260 695520 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1124 09:04:54.854133 695520 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1124 09:04:54.862216 695520 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/439524.pem && ln -fs /usr/share/ca-certificates/439524.pem /etc/ssl/certs/439524.pem"
I1124 09:04:54.870277 695520 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/439524.pem
I1124 09:04:54.873860 695520 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 24 08:38 /usr/share/ca-certificates/439524.pem
I1124 09:04:54.873906 695520 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/439524.pem
I1124 09:04:54.910146 695520 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/439524.pem /etc/ssl/certs/51391683.0"
I1124 09:04:54.919148 695520 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/4395242.pem && ln -fs /usr/share/ca-certificates/4395242.pem /etc/ssl/certs/4395242.pem"
I1124 09:04:54.927753 695520 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/4395242.pem
I1124 09:04:54.931870 695520 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 24 08:38 /usr/share/ca-certificates/4395242.pem
I1124 09:04:54.931921 695520 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/4395242.pem
I1124 09:04:54.972285 695520 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/4395242.pem /etc/ssl/certs/3ec20f2e.0"
I1124 09:04:54.981223 695520 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1124 09:04:54.984999 695520 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1124 09:04:54.985067 695520 kubeadm.go:401] StartCluster: {Name:old-k8s-version-128377 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-128377 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 09:04:54.985165 695520 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1124 09:04:54.985213 695520 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1124 09:04:55.012874 695520 cri.go:89] found id: ""
I1124 09:04:55.012940 695520 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1124 09:04:55.020831 695520 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1124 09:04:55.029069 695520 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1124 09:04:55.029111 695520 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1124 09:04:55.036334 695520 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1124 09:04:55.036348 695520 kubeadm.go:158] found existing configuration files:
I1124 09:04:55.036384 695520 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1124 09:04:55.044532 695520 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1124 09:04:55.044579 695520 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1124 09:04:55.051885 695520 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1124 09:04:55.059335 695520 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1124 09:04:55.059381 695520 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1124 09:04:55.066924 695520 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1124 09:04:55.075157 695520 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1124 09:04:55.075202 695520 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1124 09:04:55.082536 695520 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1124 09:04:55.090276 695520 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1124 09:04:55.090333 695520 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1124 09:04:55.097848 695520 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1124 09:04:55.141844 695520 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1124 09:04:55.142222 695520 kubeadm.go:319] [preflight] Running pre-flight checks
I1124 09:04:55.176293 695520 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1124 09:04:55.176360 695520 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1124 09:04:55.176399 695520 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1124 09:04:55.176522 695520 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1124 09:04:55.176607 695520 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1124 09:04:55.176692 695520 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1124 09:04:55.176788 695520 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1124 09:04:55.176861 695520 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1124 09:04:55.176926 695520 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1124 09:04:55.177000 695520 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1124 09:04:55.177072 695520 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1124 09:04:55.267260 695520 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1124 09:04:55.267430 695520 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1124 09:04:55.267573 695520 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1124 09:04:55.406819 695520 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1124 09:04:55.408942 695520 out.go:252] - Generating certificates and keys ...
I1124 09:04:55.409040 695520 kubeadm.go:319] [certs] Using existing ca certificate authority
I1124 09:04:55.409154 695520 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1124 09:04:55.535942 695520 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1124 09:04:55.747446 695520 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1124 09:04:56.231180 695520 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1124 09:04:56.348617 695520 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1124 09:04:56.564540 695520 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1124 09:04:56.564771 695520 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-128377] and IPs [192.168.103.2 127.0.0.1 ::1]
I1124 09:04:54.234417 696018 cli_runner.go:164] Run: docker network inspect no-preload-820576 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 09:04:54.252265 696018 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I1124 09:04:54.256402 696018 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 09:04:54.271173 696018 kubeadm.go:884] updating cluster {Name:no-preload-820576 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0-beta.0 ClusterName:no-preload-820576 Namespace:default APIServerHAVIP: APIServerName:minikub
eCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:fal
se CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1124 09:04:54.271376 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:04:54.585565 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:04:54.895614 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:04:55.213448 696018 preload.go:188] Checking if preload exists for k8s version v1.35.0-beta.0 and runtime containerd
I1124 09:04:55.213537 696018 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 09:04:55.248674 696018 containerd.go:623] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.35.0-beta.0". assuming images are not preloaded.
I1124 09:04:55.248704 696018 cache_images.go:90] LoadCachedImages start: [registry.k8s.io/kube-apiserver:v1.35.0-beta.0 registry.k8s.io/kube-controller-manager:v1.35.0-beta.0 registry.k8s.io/kube-scheduler:v1.35.0-beta.0 registry.k8s.io/kube-proxy:v1.35.0-beta.0 registry.k8s.io/pause:3.10.1 registry.k8s.io/etcd:3.5.24-0 registry.k8s.io/coredns/coredns:v1.13.1 gcr.io/k8s-minikube/storage-provisioner:v5]
I1124 09:04:55.248761 696018 image.go:138] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:04:55.248818 696018 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.35.0-beta.0
I1124 09:04:55.248841 696018 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.13.1
I1124 09:04:55.248860 696018 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1124 09:04:55.248864 696018 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.35.0-beta.0
I1124 09:04:55.248833 696018 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.35.0-beta.0
I1124 09:04:55.248841 696018 image.go:138] retrieving image: registry.k8s.io/etcd:3.5.24-0
I1124 09:04:55.249034 696018 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.35.0-beta.0
I1124 09:04:55.250186 696018 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.13.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.13.1
I1124 09:04:55.250215 696018 image.go:181] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:04:55.250182 696018 image.go:181] daemon lookup for registry.k8s.io/etcd:3.5.24-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.5.24-0
I1124 09:04:55.250186 696018 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1124 09:04:55.250253 696018 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.35.0-beta.0: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.35.0-beta.0
I1124 09:04:55.250254 696018 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.35.0-beta.0: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.35.0-beta.0
I1124 09:04:55.250188 696018 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.35.0-beta.0: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.35.0-beta.0
I1124 09:04:55.250648 696018 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.35.0-beta.0: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.35.0-beta.0
I1124 09:04:55.411211 696018 containerd.go:267] Checking existence of image with name "registry.k8s.io/coredns/coredns:v1.13.1" and sha "aa5e3ebc0dfed0566805186b9e47110d8f9122291d8bad1497e78873ad291139"
I1124 09:04:55.411274 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/coredns/coredns:v1.13.1
I1124 09:04:55.432666 696018 cache_images.go:118] "registry.k8s.io/coredns/coredns:v1.13.1" needs transfer: "registry.k8s.io/coredns/coredns:v1.13.1" does not exist at hash "aa5e3ebc0dfed0566805186b9e47110d8f9122291d8bad1497e78873ad291139" in container runtime
I1124 09:04:55.432717 696018 cri.go:218] Removing image: registry.k8s.io/coredns/coredns:v1.13.1
I1124 09:04:55.432775 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:55.436380 696018 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.35.0-beta.0" and sha "8a4ded35a3eb1a80eb49c1a887194460a56b413eed7eb69e59605daf4ec23810"
I1124 09:04:55.436448 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-proxy:v1.35.0-beta.0
I1124 09:04:55.436570 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.13.1
I1124 09:04:55.438317 696018 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.35.0-beta.0" and sha "aa9d02839d8def718798bd410c88aba69248b26a8f0e3af2c728b512b67cb52b"
I1124 09:04:55.438376 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-apiserver:v1.35.0-beta.0
I1124 09:04:55.445544 696018 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.35.0-beta.0" and sha "45f3cc72d235f1cfda3de70fe9b2b9d3b356091e491b915f9efd6f0d6e5253bc"
I1124 09:04:55.445608 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-controller-manager:v1.35.0-beta.0
I1124 09:04:55.462611 696018 cache_images.go:118] "registry.k8s.io/kube-proxy:v1.35.0-beta.0" needs transfer: "registry.k8s.io/kube-proxy:v1.35.0-beta.0" does not exist at hash "8a4ded35a3eb1a80eb49c1a887194460a56b413eed7eb69e59605daf4ec23810" in container runtime
I1124 09:04:55.462672 696018 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.35.0-beta.0
I1124 09:04:55.462735 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:55.466873 696018 containerd.go:267] Checking existence of image with name "registry.k8s.io/pause:3.10.1" and sha "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f"
I1124 09:04:55.466944 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/pause:3.10.1
I1124 09:04:55.469707 696018 cache_images.go:118] "registry.k8s.io/kube-apiserver:v1.35.0-beta.0" needs transfer: "registry.k8s.io/kube-apiserver:v1.35.0-beta.0" does not exist at hash "aa9d02839d8def718798bd410c88aba69248b26a8f0e3af2c728b512b67cb52b" in container runtime
I1124 09:04:55.469760 696018 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.35.0-beta.0
I1124 09:04:55.469761 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.13.1
I1124 09:04:55.469806 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:55.476188 696018 containerd.go:267] Checking existence of image with name "registry.k8s.io/etcd:3.5.24-0" and sha "8cb12dd0c3e42c6d0175d09a060358cbb68a3ecc2ba4dbb00327c7d760e1425d"
I1124 09:04:55.476260 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/etcd:3.5.24-0
I1124 09:04:55.476601 696018 cache_images.go:118] "registry.k8s.io/kube-controller-manager:v1.35.0-beta.0" needs transfer: "registry.k8s.io/kube-controller-manager:v1.35.0-beta.0" does not exist at hash "45f3cc72d235f1cfda3de70fe9b2b9d3b356091e491b915f9efd6f0d6e5253bc" in container runtime
I1124 09:04:55.476645 696018 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.35.0-beta.0
I1124 09:04:55.476700 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:55.476760 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.35.0-beta.0
I1124 09:04:55.483510 696018 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.35.0-beta.0" and sha "7bb6219ddab95bdabbef83f051bee4fdd14b6f791aaa3121080cb2c58ada2e46"
I1124 09:04:55.483571 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-scheduler:v1.35.0-beta.0
I1124 09:04:55.493634 696018 cache_images.go:118] "registry.k8s.io/pause:3.10.1" needs transfer: "registry.k8s.io/pause:3.10.1" does not exist at hash "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f" in container runtime
I1124 09:04:55.493674 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.35.0-beta.0
I1124 09:04:55.493687 696018 cri.go:218] Removing image: registry.k8s.io/pause:3.10.1
I1124 09:04:55.493730 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:55.504559 696018 cache_images.go:118] "registry.k8s.io/etcd:3.5.24-0" needs transfer: "registry.k8s.io/etcd:3.5.24-0" does not exist at hash "8cb12dd0c3e42c6d0175d09a060358cbb68a3ecc2ba4dbb00327c7d760e1425d" in container runtime
I1124 09:04:55.504599 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.13.1
I1124 09:04:55.504606 696018 cri.go:218] Removing image: registry.k8s.io/etcd:3.5.24-0
I1124 09:04:55.504646 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:55.512866 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.35.0-beta.0
I1124 09:04:55.512892 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.35.0-beta.0
I1124 09:04:55.512910 696018 cache_images.go:118] "registry.k8s.io/kube-scheduler:v1.35.0-beta.0" needs transfer: "registry.k8s.io/kube-scheduler:v1.35.0-beta.0" does not exist at hash "7bb6219ddab95bdabbef83f051bee4fdd14b6f791aaa3121080cb2c58ada2e46" in container runtime
I1124 09:04:55.512950 696018 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.35.0-beta.0
I1124 09:04:55.512990 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:55.526695 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1124 09:04:55.526717 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.35.0-beta.0
I1124 09:04:55.526785 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.5.24-0
I1124 09:04:55.539513 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.13.1
I1124 09:04:55.539663 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.13.1
I1124 09:04:55.546674 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.35.0-beta.0
I1124 09:04:55.546750 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.35.0-beta.0
I1124 09:04:55.546715 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.35.0-beta.0
I1124 09:04:55.564076 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.5.24-0
I1124 09:04:55.567023 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/coredns_v1.13.1: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.13.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/coredns_v1.13.1': No such file or directory
I1124 09:04:55.567049 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.35.0-beta.0
I1124 09:04:55.567061 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.13.1 --> /var/lib/minikube/images/coredns_v1.13.1 (23562752 bytes)
I1124 09:04:55.567151 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1124 09:04:55.598524 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.35.0-beta.0
I1124 09:04:55.598552 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.35.0-beta.0
I1124 09:04:55.598652 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.35.0-beta.0
I1124 09:04:55.598735 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.35.0-beta.0
I1124 09:04:55.614879 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.5.24-0
I1124 09:04:55.624975 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.35.0-beta.0
I1124 09:04:55.625072 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0
I1124 09:04:55.679323 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1124 09:04:55.684055 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.35.0-beta.0
I1124 09:04:55.684090 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.35.0-beta.0
I1124 09:04:55.684124 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-proxy_v1.35.0-beta.0: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.35.0-beta.0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-proxy_v1.35.0-beta.0': No such file or directory
I1124 09:04:55.684140 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0
I1124 09:04:55.684150 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.5.24-0
I1124 09:04:55.684159 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0': No such file or directory
I1124 09:04:55.684160 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.35.0-beta.0 --> /var/lib/minikube/images/kube-proxy_v1.35.0-beta.0 (25788928 bytes)
I1124 09:04:55.684171 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.35.0-beta.0 --> /var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0 (27682304 bytes)
I1124 09:04:55.684244 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.5.24-0
I1124 09:04:55.736024 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1124 09:04:55.736135 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1
I1124 09:04:55.746073 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0': No such file or directory
I1124 09:04:55.746108 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.35.0-beta.0 --> /var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0 (23131648 bytes)
I1124 09:04:55.746157 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.35.0-beta.0
I1124 09:04:55.746175 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/etcd_3.5.24-0: stat -c "%s %y" /var/lib/minikube/images/etcd_3.5.24-0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/etcd_3.5.24-0': No such file or directory
I1124 09:04:55.746191 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.5.24-0 --> /var/lib/minikube/images/etcd_3.5.24-0 (23728640 bytes)
I1124 09:04:55.746248 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.35.0-beta.0
I1124 09:04:55.801010 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/pause_3.10.1: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/pause_3.10.1': No such file or directory
I1124 09:04:55.801049 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 --> /var/lib/minikube/images/pause_3.10.1 (321024 bytes)
I1124 09:04:55.808405 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-scheduler_v1.35.0-beta.0: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.35.0-beta.0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-scheduler_v1.35.0-beta.0': No such file or directory
I1124 09:04:55.808441 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.35.0-beta.0 --> /var/lib/minikube/images/kube-scheduler_v1.35.0-beta.0 (17239040 bytes)
I1124 09:04:55.880897 696018 containerd.go:285] Loading image: /var/lib/minikube/images/pause_3.10.1
I1124 09:04:55.880969 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/pause_3.10.1
I1124 09:04:56.015999 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 from cache
I1124 09:04:56.068815 696018 containerd.go:285] Loading image: /var/lib/minikube/images/etcd_3.5.24-0
I1124 09:04:56.068912 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.5.24-0
I1124 09:04:56.453297 696018 containerd.go:267] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562"
I1124 09:04:56.453371 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:04:57.304727 696018 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.5.24-0: (1.235782073s)
I1124 09:04:57.304763 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.5.24-0 from cache
I1124 09:04:57.304794 696018 containerd.go:285] Loading image: /var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0
I1124 09:04:57.304806 696018 cache_images.go:118] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562" in container runtime
I1124 09:04:57.304847 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0
I1124 09:04:57.304858 696018 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:04:57.304920 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:56.768431 695520 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1124 09:04:56.768677 695520 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-128377] and IPs [192.168.103.2 127.0.0.1 ::1]
I1124 09:04:57.042517 695520 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1124 09:04:57.135211 695520 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1124 09:04:57.487492 695520 kubeadm.go:319] [certs] Generating "sa" key and public key
I1124 09:04:57.487607 695520 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1124 09:04:57.647815 695520 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1124 09:04:57.788032 695520 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1124 09:04:58.007063 695520 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1124 09:04:58.262043 695520 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1124 09:04:58.262616 695520 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1124 09:04:58.265868 695520 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1124 09:04:55.921561 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1124 09:04:55.921607 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:04:58.266858 695520 out.go:252] - Booting up control plane ...
I1124 09:04:58.266989 695520 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1124 09:04:58.267065 695520 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1124 09:04:58.267746 695520 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1124 09:04:58.282824 695520 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1124 09:04:58.283699 695520 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1124 09:04:58.283773 695520 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1124 09:04:58.419897 695520 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1124 09:04:58.797650 696018 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0: (1.492766226s)
I1124 09:04:58.797672 696018 ssh_runner.go:235] Completed: which crictl: (1.492732478s)
I1124 09:04:58.797693 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.35.0-beta.0 from cache
I1124 09:04:58.797722 696018 containerd.go:285] Loading image: /var/lib/minikube/images/kube-scheduler_v1.35.0-beta.0
I1124 09:04:58.797742 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:04:58.797763 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.35.0-beta.0
I1124 09:04:59.494097 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.35.0-beta.0 from cache
I1124 09:04:59.494141 696018 containerd.go:285] Loading image: /var/lib/minikube/images/coredns_v1.13.1
I1124 09:04:59.494193 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.13.1
I1124 09:04:59.494314 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:05:00.636087 696018 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.13.1: (1.141861944s)
I1124 09:05:00.636150 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.13.1 from cache
I1124 09:05:00.636183 696018 containerd.go:285] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0
I1124 09:05:00.636184 696018 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.141835433s)
I1124 09:05:00.636272 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0
I1124 09:05:00.636277 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:05:01.829551 696018 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0: (1.193240306s)
I1124 09:05:01.829586 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.35.0-beta.0 from cache
I1124 09:05:01.829561 696018 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.193259021s)
I1124 09:05:01.829618 696018 containerd.go:285] Loading image: /var/lib/minikube/images/kube-proxy_v1.35.0-beta.0
I1124 09:05:01.829656 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.35.0-beta.0
I1124 09:05:01.829661 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5
I1124 09:05:01.829741 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5
I1124 09:05:02.922442 695520 kubeadm.go:319] [apiclient] All control plane components are healthy after 4.502768 seconds
I1124 09:05:02.922650 695520 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1124 09:05:02.938003 695520 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1124 09:05:03.487168 695520 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1124 09:05:03.487569 695520 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-128377 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1124 09:05:03.997647 695520 kubeadm.go:319] [bootstrap-token] Using token: jnao2u.ovlrxqviyhx4po41
I1124 09:05:03.999063 695520 out.go:252] - Configuring RBAC rules ...
I1124 09:05:03.999223 695520 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1124 09:05:04.003823 695520 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1124 09:05:04.010298 695520 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1124 09:05:04.012923 695520 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1124 09:05:04.015535 695520 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1124 09:05:04.019043 695520 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1124 09:05:04.029389 695520 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1124 09:05:04.209549 695520 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1124 09:05:04.407855 695520 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1124 09:05:04.408750 695520 kubeadm.go:319]
I1124 09:05:04.408814 695520 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1124 09:05:04.408821 695520 kubeadm.go:319]
I1124 09:05:04.408930 695520 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1124 09:05:04.408949 695520 kubeadm.go:319]
I1124 09:05:04.408983 695520 kubeadm.go:319] mkdir -p $HOME/.kube
I1124 09:05:04.409060 695520 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1124 09:05:04.409107 695520 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1124 09:05:04.409122 695520 kubeadm.go:319]
I1124 09:05:04.409207 695520 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1124 09:05:04.409227 695520 kubeadm.go:319]
I1124 09:05:04.409283 695520 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1124 09:05:04.409289 695520 kubeadm.go:319]
I1124 09:05:04.409340 695520 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1124 09:05:04.409401 695520 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1124 09:05:04.409519 695520 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1124 09:05:04.409531 695520 kubeadm.go:319]
I1124 09:05:04.409633 695520 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1124 09:05:04.409739 695520 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1124 09:05:04.409748 695520 kubeadm.go:319]
I1124 09:05:04.409856 695520 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token jnao2u.ovlrxqviyhx4po41 \
I1124 09:05:04.409989 695520 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:058f105135414f3c09406a88ceaaa8a4946b8fa5ee02b1189df823d65cc738be \
I1124 09:05:04.410028 695520 kubeadm.go:319] --control-plane
I1124 09:05:04.410043 695520 kubeadm.go:319]
I1124 09:05:04.410157 695520 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1124 09:05:04.410168 695520 kubeadm.go:319]
I1124 09:05:04.410253 695520 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token jnao2u.ovlrxqviyhx4po41 \
I1124 09:05:04.410416 695520 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:058f105135414f3c09406a88ceaaa8a4946b8fa5ee02b1189df823d65cc738be
I1124 09:05:04.412734 695520 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1124 09:05:04.412863 695520 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1124 09:05:04.412887 695520 cni.go:84] Creating CNI manager for ""
I1124 09:05:04.412895 695520 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 09:05:04.414780 695520 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1124 09:05:00.922661 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1124 09:05:00.922710 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:04.415630 695520 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1124 09:05:04.420099 695520 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1124 09:05:04.420115 695520 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1124 09:05:04.433073 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1124 09:05:05.091722 695520 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1124 09:05:05.091870 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-128377 minikube.k8s.io/updated_at=2025_11_24T09_05_05_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=393ee3e0b845623107dce6cda4f48ffd5c3d1811 minikube.k8s.io/name=old-k8s-version-128377 minikube.k8s.io/primary=true
I1124 09:05:05.092348 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:05.102498 695520 ops.go:34] apiserver oom_adj: -16
I1124 09:05:05.174868 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:05.675283 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:06.175310 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:02.915588 696018 ssh_runner.go:235] Completed: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: (1.085815853s)
I1124 09:05:02.915634 696018 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.35.0-beta.0: (1.085954166s)
I1124 09:05:02.915671 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.35.0-beta.0 from cache
I1124 09:05:02.915639 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/storage-provisioner_v5: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/storage-provisioner_v5': No such file or directory
I1124 09:05:02.915716 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 --> /var/lib/minikube/images/storage-provisioner_v5 (9060352 bytes)
I1124 09:05:02.976753 696018 containerd.go:285] Loading image: /var/lib/minikube/images/storage-provisioner_v5
I1124 09:05:02.976825 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5
I1124 09:05:03.348632 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
I1124 09:05:03.348678 696018 cache_images.go:125] Successfully loaded all cached images
I1124 09:05:03.348686 696018 cache_images.go:94] duration metric: took 8.099965824s to LoadCachedImages
I1124 09:05:03.348703 696018 kubeadm.go:935] updating node { 192.168.85.2 8443 v1.35.0-beta.0 containerd true true} ...
I1124 09:05:03.348825 696018 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.35.0-beta.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-820576 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.35.0-beta.0 ClusterName:no-preload-820576 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1124 09:05:03.348894 696018 ssh_runner.go:195] Run: sudo crictl info
I1124 09:05:03.376137 696018 cni.go:84] Creating CNI manager for ""
I1124 09:05:03.376168 696018 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 09:05:03.376188 696018 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1124 09:05:03.376210 696018 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.35.0-beta.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-820576 NodeName:no-preload-820576 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt Stat
icPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1124 09:05:03.376350 696018 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "no-preload-820576"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.85.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.35.0-beta.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1124 09:05:03.376422 696018 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.35.0-beta.0
I1124 09:05:03.385368 696018 binaries.go:54] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.35.0-beta.0: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/binaries/v1.35.0-beta.0': No such file or directory
Initiating transfer...
I1124 09:05:03.385424 696018 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.35.0-beta.0
I1124 09:05:03.394095 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubectl?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubectl.sha256
I1124 09:05:03.394128 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:05:03.394180 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubelet?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubelet.sha256
I1124 09:05:03.394191 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl
I1124 09:05:03.394205 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.35.0-beta.0/kubeadm
I1124 09:05:03.394225 696018 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 09:05:03.399712 696018 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.35.0-beta.0/kubeadm: stat -c "%s %y" /var/lib/minikube/binaries/v1.35.0-beta.0/kubeadm: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.35.0-beta.0/kubeadm': No such file or directory
I1124 09:05:03.399743 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/linux/amd64/v1.35.0-beta.0/kubeadm --> /var/lib/minikube/binaries/v1.35.0-beta.0/kubeadm (72364216 bytes)
I1124 09:05:03.399797 696018 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl: stat -c "%s %y" /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.35.0-beta.0/kubectl': No such file or directory
I1124 09:05:03.399839 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/linux/amd64/v1.35.0-beta.0/kubectl --> /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl (58589368 bytes)
I1124 09:05:03.414063 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.35.0-beta.0/kubelet
I1124 09:05:03.448582 696018 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.35.0-beta.0/kubelet: stat -c "%s %y" /var/lib/minikube/binaries/v1.35.0-beta.0/kubelet: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.35.0-beta.0/kubelet': No such file or directory
I1124 09:05:03.448623 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/linux/amd64/v1.35.0-beta.0/kubelet --> /var/lib/minikube/binaries/v1.35.0-beta.0/kubelet (58106148 bytes)
I1124 09:05:03.941988 696018 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1124 09:05:03.950659 696018 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (328 bytes)
I1124 09:05:03.964545 696018 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (359 bytes)
I1124 09:05:03.980698 696018 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2236 bytes)
I1124 09:05:03.994370 696018 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I1124 09:05:03.999682 696018 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 09:05:04.011951 696018 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 09:05:04.105068 696018 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 09:05:04.129581 696018 certs.go:69] Setting up /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576 for IP: 192.168.85.2
I1124 09:05:04.129609 696018 certs.go:195] generating shared ca certs ...
I1124 09:05:04.129631 696018 certs.go:227] acquiring lock for ca certs: {Name:mk977567029a87925dffc7f909bfa5f74bf239fc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:04.129796 696018 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21978-435860/.minikube/ca.key
I1124 09:05:04.129861 696018 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21978-435860/.minikube/proxy-client-ca.key
I1124 09:05:04.129876 696018 certs.go:257] generating profile certs ...
I1124 09:05:04.129944 696018 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/client.key
I1124 09:05:04.129964 696018 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/client.crt with IP's: []
I1124 09:05:04.178331 696018 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/client.crt ...
I1124 09:05:04.178368 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/client.crt: {Name:mk7a6d48f62cb24db3b80fa6902658a2fab15360 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:04.178586 696018 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/client.key ...
I1124 09:05:04.178605 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/client.key: {Name:mke761c4ec29e36beccc716dc800bc8fd841e3c6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:04.178724 696018 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.key.402ae632
I1124 09:05:04.178748 696018 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.crt.402ae632 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.85.2]
I1124 09:05:04.417670 696018 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.crt.402ae632 ...
I1124 09:05:04.417694 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.crt.402ae632: {Name:mk59a2d57d772e51aeeeb2a9a4dca760203e6d09 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:04.417874 696018 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.key.402ae632 ...
I1124 09:05:04.417897 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.key.402ae632: {Name:mkdb0be38fd80ef77438b49aa69b9308c6d28ca3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:04.418023 696018 certs.go:382] copying /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.crt.402ae632 -> /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.crt
I1124 09:05:04.418147 696018 certs.go:386] copying /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.key.402ae632 -> /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.key
I1124 09:05:04.418202 696018 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.key
I1124 09:05:04.418217 696018 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.crt with IP's: []
I1124 09:05:04.604435 696018 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.crt ...
I1124 09:05:04.604497 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.crt: {Name:mk5719f2112f16d39272baf4588ce9b65d33d2a3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:04.604728 696018 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.key ...
I1124 09:05:04.604746 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.key: {Name:mk56d8ccc21a879d6506ee3380097e85fb4b4f95 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:04.605022 696018 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/439524.pem (1338 bytes)
W1124 09:05:04.605073 696018 certs.go:480] ignoring /home/jenkins/minikube-integration/21978-435860/.minikube/certs/439524_empty.pem, impossibly tiny 0 bytes
I1124 09:05:04.605084 696018 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca-key.pem (1675 bytes)
I1124 09:05:04.605120 696018 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem (1082 bytes)
I1124 09:05:04.605160 696018 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem (1123 bytes)
I1124 09:05:04.605195 696018 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/key.pem (1675 bytes)
I1124 09:05:04.605369 696018 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem (1708 bytes)
I1124 09:05:04.606568 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1124 09:05:04.626964 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1124 09:05:04.644973 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1124 09:05:04.663649 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1124 09:05:04.681360 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1124 09:05:04.699027 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1124 09:05:04.716381 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1124 09:05:04.734298 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1124 09:05:04.752033 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem --> /usr/share/ca-certificates/4395242.pem (1708 bytes)
I1124 09:05:04.771861 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1124 09:05:04.789824 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/certs/439524.pem --> /usr/share/ca-certificates/439524.pem (1338 bytes)
I1124 09:05:04.808313 696018 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1124 09:05:04.826085 696018 ssh_runner.go:195] Run: openssl version
I1124 09:05:04.834356 696018 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1124 09:05:04.843772 696018 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1124 09:05:04.848660 696018 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 24 08:30 /usr/share/ca-certificates/minikubeCA.pem
I1124 09:05:04.848725 696018 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1124 09:05:04.887168 696018 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1124 09:05:04.897113 696018 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/439524.pem && ln -fs /usr/share/ca-certificates/439524.pem /etc/ssl/certs/439524.pem"
I1124 09:05:04.907480 696018 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/439524.pem
I1124 09:05:04.911694 696018 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 24 08:38 /usr/share/ca-certificates/439524.pem
I1124 09:05:04.911746 696018 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/439524.pem
I1124 09:05:04.951326 696018 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/439524.pem /etc/ssl/certs/51391683.0"
I1124 09:05:04.961765 696018 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/4395242.pem && ln -fs /usr/share/ca-certificates/4395242.pem /etc/ssl/certs/4395242.pem"
I1124 09:05:04.972056 696018 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/4395242.pem
I1124 09:05:04.976497 696018 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 24 08:38 /usr/share/ca-certificates/4395242.pem
I1124 09:05:04.976554 696018 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/4395242.pem
I1124 09:05:05.017003 696018 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/4395242.pem /etc/ssl/certs/3ec20f2e.0"
I1124 09:05:05.027292 696018 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1124 09:05:05.031547 696018 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1124 09:05:05.031616 696018 kubeadm.go:401] StartCluster: {Name:no-preload-820576 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0-beta.0 ClusterName:no-preload-820576 Namespace:default APIServerHAVIP: APIServerName:minikubeCA
APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false
CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 09:05:05.031711 696018 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1124 09:05:05.031765 696018 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1124 09:05:05.062044 696018 cri.go:89] found id: ""
I1124 09:05:05.062126 696018 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1124 09:05:05.071887 696018 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1124 09:05:05.082157 696018 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1124 09:05:05.082217 696018 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1124 09:05:05.091225 696018 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1124 09:05:05.091248 696018 kubeadm.go:158] found existing configuration files:
I1124 09:05:05.091296 696018 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1124 09:05:05.100600 696018 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1124 09:05:05.100657 696018 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1124 09:05:05.110555 696018 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1124 09:05:05.119216 696018 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1124 09:05:05.119288 696018 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1124 09:05:05.127876 696018 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1124 09:05:05.136154 696018 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1124 09:05:05.136205 696018 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1124 09:05:05.145077 696018 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1124 09:05:05.154290 696018 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1124 09:05:05.154338 696018 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1124 09:05:05.162702 696018 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-beta.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1124 09:05:05.200662 696018 kubeadm.go:319] [init] Using Kubernetes version: v1.35.0-beta.0
I1124 09:05:05.200757 696018 kubeadm.go:319] [preflight] Running pre-flight checks
I1124 09:05:05.269623 696018 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1124 09:05:05.269714 696018 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1124 09:05:05.269770 696018 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1124 09:05:05.269842 696018 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1124 09:05:05.269920 696018 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1124 09:05:05.270003 696018 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1124 09:05:05.270084 696018 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1124 09:05:05.270155 696018 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1124 09:05:05.270223 696018 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1124 09:05:05.270303 696018 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1124 09:05:05.270377 696018 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1124 09:05:05.332844 696018 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1124 09:05:05.332992 696018 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1124 09:05:05.333150 696018 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1124 09:05:06.734694 696018 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1124 09:05:06.738817 696018 out.go:252] - Generating certificates and keys ...
I1124 09:05:06.738929 696018 kubeadm.go:319] [certs] Using existing ca certificate authority
I1124 09:05:06.739072 696018 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1124 09:05:06.832143 696018 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1124 09:05:06.955015 696018 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1124 09:05:07.027143 696018 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1124 09:05:07.115762 696018 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1124 09:05:07.265716 696018 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1124 09:05:07.265857 696018 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost no-preload-820576] and IPs [192.168.85.2 127.0.0.1 ::1]
I1124 09:05:07.364684 696018 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1124 09:05:07.364865 696018 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost no-preload-820576] and IPs [192.168.85.2 127.0.0.1 ::1]
I1124 09:05:07.523315 696018 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1124 09:05:07.590589 696018 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1124 09:05:07.746307 696018 kubeadm.go:319] [certs] Generating "sa" key and public key
I1124 09:05:07.746426 696018 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1124 09:05:07.869677 696018 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1124 09:05:07.978931 696018 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1124 09:05:08.053720 696018 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1124 09:05:08.085227 696018 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1124 09:05:08.160011 696018 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1124 09:05:08.160849 696018 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1124 09:05:08.165435 696018 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1124 09:05:05.923694 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1124 09:05:05.923742 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:06.675415 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:07.175277 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:07.676031 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:08.174962 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:08.675088 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:09.175102 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:09.675096 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:10.175027 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:10.675655 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:11.175703 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:08.166975 696018 out.go:252] - Booting up control plane ...
I1124 09:05:08.167117 696018 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1124 09:05:08.167189 696018 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1124 09:05:08.167816 696018 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1124 09:05:08.183769 696018 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1124 09:05:08.183936 696018 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1124 09:05:08.191856 696018 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1124 09:05:08.191990 696018 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1124 09:05:08.192031 696018 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1124 09:05:08.308076 696018 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1124 09:05:08.308205 696018 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1124 09:05:09.309901 696018 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.001908715s
I1124 09:05:09.316051 696018 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1124 09:05:09.316157 696018 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.85.2:8443/livez
I1124 09:05:09.316247 696018 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1124 09:05:09.316315 696018 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1124 09:05:10.320869 696018 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 1.004644301s
I1124 09:05:10.832866 696018 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 1.516703459s
I1124 09:05:12.317179 696018 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 3.001080604s
I1124 09:05:12.331544 696018 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1124 09:05:12.339378 696018 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1124 09:05:12.347526 696018 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1124 09:05:12.347705 696018 kubeadm.go:319] [mark-control-plane] Marking the node no-preload-820576 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1124 09:05:12.354657 696018 kubeadm.go:319] [bootstrap-token] Using token: awoygq.wealvtzys3befsou
I1124 09:05:12.355757 696018 out.go:252] - Configuring RBAC rules ...
I1124 09:05:12.355888 696018 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1124 09:05:12.359613 696018 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1124 09:05:12.364202 696018 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1124 09:05:12.366491 696018 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1124 09:05:12.369449 696018 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1124 09:05:12.371508 696018 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1124 09:05:12.722783 696018 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1124 09:05:13.137535 696018 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1124 09:05:13.723038 696018 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1124 09:05:13.724197 696018 kubeadm.go:319]
I1124 09:05:13.724302 696018 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1124 09:05:13.724317 696018 kubeadm.go:319]
I1124 09:05:13.724412 696018 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1124 09:05:13.724424 696018 kubeadm.go:319]
I1124 09:05:13.724520 696018 kubeadm.go:319] mkdir -p $HOME/.kube
I1124 09:05:13.724630 696018 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1124 09:05:13.724716 696018 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1124 09:05:13.724730 696018 kubeadm.go:319]
I1124 09:05:13.724818 696018 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1124 09:05:13.724831 696018 kubeadm.go:319]
I1124 09:05:13.724897 696018 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1124 09:05:13.724906 696018 kubeadm.go:319]
I1124 09:05:13.724990 696018 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1124 09:05:13.725105 696018 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1124 09:05:13.725212 696018 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1124 09:05:13.725221 696018 kubeadm.go:319]
I1124 09:05:13.725338 696018 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1124 09:05:13.725493 696018 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1124 09:05:13.725510 696018 kubeadm.go:319]
I1124 09:05:13.725601 696018 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token awoygq.wealvtzys3befsou \
I1124 09:05:13.725765 696018 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:058f105135414f3c09406a88ceaaa8a4946b8fa5ee02b1189df823d65cc738be \
I1124 09:05:13.725804 696018 kubeadm.go:319] --control-plane
I1124 09:05:13.725816 696018 kubeadm.go:319]
I1124 09:05:13.725934 696018 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1124 09:05:13.725944 696018 kubeadm.go:319]
I1124 09:05:13.726041 696018 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token awoygq.wealvtzys3befsou \
I1124 09:05:13.726243 696018 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:058f105135414f3c09406a88ceaaa8a4946b8fa5ee02b1189df823d65cc738be
I1124 09:05:13.728504 696018 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1124 09:05:13.728661 696018 kubeadm.go:319] [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1124 09:05:13.728704 696018 cni.go:84] Creating CNI manager for ""
I1124 09:05:13.728716 696018 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 09:05:13.730529 696018 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1124 09:05:10.924882 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1124 09:05:10.924923 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:11.109506 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": read tcp 192.168.76.1:47578->192.168.76.2:8443: read: connection reset by peer
I1124 09:05:11.421112 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:11.421646 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:11.920950 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:11.921496 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:12.421219 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:12.421692 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:12.921430 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:12.921911 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:13.420431 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:13.420926 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:13.920542 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:13.921060 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:14.420434 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:14.420859 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:11.675776 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:12.175192 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:12.675267 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:13.175941 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:13.675281 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:14.175267 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:14.675185 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:15.175391 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:15.675966 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:16.175887 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:16.675144 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:17.175281 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:17.260591 695520 kubeadm.go:1114] duration metric: took 12.168846115s to wait for elevateKubeSystemPrivileges
I1124 09:05:17.260625 695520 kubeadm.go:403] duration metric: took 22.275566194s to StartCluster
I1124 09:05:17.260655 695520 settings.go:142] acquiring lock: {Name:mk02cbf979fc883a7cfa89d50f2f1c6cf88236e4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:17.260738 695520 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21978-435860/kubeconfig
I1124 09:05:17.261860 695520 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/kubeconfig: {Name:mk42183bd63f8b44881819ac352384aa0ef5afa7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:17.262121 695520 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1124 09:05:17.262124 695520 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 09:05:17.262197 695520 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1124 09:05:17.262308 695520 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-128377"
I1124 09:05:17.262334 695520 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-128377"
I1124 09:05:17.262358 695520 config.go:182] Loaded profile config "old-k8s-version-128377": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 09:05:17.262376 695520 host.go:66] Checking if "old-k8s-version-128377" exists ...
I1124 09:05:17.262351 695520 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-128377"
I1124 09:05:17.262443 695520 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-128377"
I1124 09:05:17.262844 695520 cli_runner.go:164] Run: docker container inspect old-k8s-version-128377 --format={{.State.Status}}
I1124 09:05:17.263075 695520 cli_runner.go:164] Run: docker container inspect old-k8s-version-128377 --format={{.State.Status}}
I1124 09:05:17.263365 695520 out.go:179] * Verifying Kubernetes components...
I1124 09:05:17.264408 695520 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 09:05:17.287510 695520 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-128377"
I1124 09:05:17.287559 695520 host.go:66] Checking if "old-k8s-version-128377" exists ...
I1124 09:05:17.287978 695520 cli_runner.go:164] Run: docker container inspect old-k8s-version-128377 --format={{.State.Status}}
I1124 09:05:17.288769 695520 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:05:13.732137 696018 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1124 09:05:13.737711 696018 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl ...
I1124 09:05:13.737726 696018 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1124 09:05:13.752118 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1124 09:05:13.951744 696018 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1124 09:05:13.951795 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:13.951847 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes no-preload-820576 minikube.k8s.io/updated_at=2025_11_24T09_05_13_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=393ee3e0b845623107dce6cda4f48ffd5c3d1811 minikube.k8s.io/name=no-preload-820576 minikube.k8s.io/primary=true
I1124 09:05:13.962047 696018 ops.go:34] apiserver oom_adj: -16
I1124 09:05:14.022754 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:14.523671 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:15.023231 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:15.523083 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:16.023230 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:16.523666 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:17.022940 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:17.523444 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:17.290230 695520 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1124 09:05:17.290253 695520 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1124 09:05:17.290314 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:05:17.317679 695520 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1124 09:05:17.317704 695520 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1124 09:05:17.317768 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:05:17.319048 695520 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33068 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa Username:docker}
I1124 09:05:17.343853 695520 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33068 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa Username:docker}
I1124 09:05:17.366525 695520 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.103.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1124 09:05:17.411998 695520 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 09:05:17.447003 695520 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1124 09:05:17.463082 695520 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1124 09:05:17.632983 695520 start.go:977] {"host.minikube.internal": 192.168.103.1} host record injected into CoreDNS's ConfigMap
I1124 09:05:17.634312 695520 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-128377" to be "Ready" ...
I1124 09:05:17.888856 695520 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1124 09:05:18.022851 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:18.523601 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:18.589169 696018 kubeadm.go:1114] duration metric: took 4.637423043s to wait for elevateKubeSystemPrivileges
I1124 09:05:18.589209 696018 kubeadm.go:403] duration metric: took 13.557597169s to StartCluster
I1124 09:05:18.589237 696018 settings.go:142] acquiring lock: {Name:mk02cbf979fc883a7cfa89d50f2f1c6cf88236e4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:18.589321 696018 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21978-435860/kubeconfig
I1124 09:05:18.590747 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/kubeconfig: {Name:mk42183bd63f8b44881819ac352384aa0ef5afa7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:18.590988 696018 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1124 09:05:18.591000 696018 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 09:05:18.591095 696018 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1124 09:05:18.591206 696018 addons.go:70] Setting storage-provisioner=true in profile "no-preload-820576"
I1124 09:05:18.591219 696018 config.go:182] Loaded profile config "no-preload-820576": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.35.0-beta.0
I1124 09:05:18.591236 696018 addons.go:239] Setting addon storage-provisioner=true in "no-preload-820576"
I1124 09:05:18.591251 696018 addons.go:70] Setting default-storageclass=true in profile "no-preload-820576"
I1124 09:05:18.591275 696018 host.go:66] Checking if "no-preload-820576" exists ...
I1124 09:05:18.591283 696018 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "no-preload-820576"
I1124 09:05:18.591664 696018 cli_runner.go:164] Run: docker container inspect no-preload-820576 --format={{.State.Status}}
I1124 09:05:18.591855 696018 cli_runner.go:164] Run: docker container inspect no-preload-820576 --format={{.State.Status}}
I1124 09:05:18.592299 696018 out.go:179] * Verifying Kubernetes components...
I1124 09:05:18.593599 696018 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 09:05:18.615163 696018 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:05:18.615451 696018 addons.go:239] Setting addon default-storageclass=true in "no-preload-820576"
I1124 09:05:18.615530 696018 host.go:66] Checking if "no-preload-820576" exists ...
I1124 09:05:18.615851 696018 cli_runner.go:164] Run: docker container inspect no-preload-820576 --format={{.State.Status}}
I1124 09:05:18.616223 696018 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1124 09:05:18.616245 696018 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1124 09:05:18.616301 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:05:18.646443 696018 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa Username:docker}
I1124 09:05:18.647885 696018 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1124 09:05:18.647963 696018 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1124 09:05:18.648059 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:05:18.675529 696018 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa Username:docker}
I1124 09:05:18.685797 696018 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.85.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1124 09:05:18.752704 696018 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 09:05:18.775922 696018 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1124 09:05:18.800792 696018 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1124 09:05:18.878758 696018 start.go:977] {"host.minikube.internal": 192.168.85.1} host record injected into CoreDNS's ConfigMap
I1124 09:05:18.880873 696018 node_ready.go:35] waiting up to 6m0s for node "no-preload-820576" to be "Ready" ...
I1124 09:05:19.096304 696018 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1124 09:05:14.921188 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:14.921633 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:15.421327 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:15.421818 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:15.920573 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:15.921034 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:16.421282 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:16.421841 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:16.921386 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:16.921942 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:17.420551 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:17.421007 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:17.920666 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:17.921181 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:18.420539 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:18.421011 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:18.920611 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:18.921079 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:19.420539 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:19.421004 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:17.889849 695520 addons.go:530] duration metric: took 627.656763ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1124 09:05:18.137738 695520 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-128377" context rescaled to 1 replicas
W1124 09:05:19.637948 695520 node_ready.go:57] node "old-k8s-version-128377" has "Ready":"False" status (will retry)
I1124 09:05:19.097398 696018 addons.go:530] duration metric: took 506.310963ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1124 09:05:19.383938 696018 kapi.go:214] "coredns" deployment in "kube-system" namespace and "no-preload-820576" context rescaled to 1 replicas
W1124 09:05:20.884989 696018 node_ready.go:57] node "no-preload-820576" has "Ready":"False" status (will retry)
I1124 09:05:19.920806 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:19.921207 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:20.420831 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:20.421312 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:20.920613 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:20.921185 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:21.420832 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:21.421240 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:21.920531 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:21.921019 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:22.420552 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
W1124 09:05:21.638057 695520 node_ready.go:57] node "old-k8s-version-128377" has "Ready":"False" status (will retry)
W1124 09:05:23.638668 695520 node_ready.go:57] node "old-k8s-version-128377" has "Ready":"False" status (will retry)
W1124 09:05:26.137883 695520 node_ready.go:57] node "old-k8s-version-128377" has "Ready":"False" status (will retry)
W1124 09:05:23.383937 696018 node_ready.go:57] node "no-preload-820576" has "Ready":"False" status (will retry)
W1124 09:05:25.384443 696018 node_ready.go:57] node "no-preload-820576" has "Ready":"False" status (will retry)
I1124 09:05:27.421276 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1124 09:05:27.421318 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
W1124 09:05:28.138098 695520 node_ready.go:57] node "old-k8s-version-128377" has "Ready":"False" status (will retry)
W1124 09:05:30.638120 695520 node_ready.go:57] node "old-k8s-version-128377" has "Ready":"False" status (will retry)
W1124 09:05:27.884284 696018 node_ready.go:57] node "no-preload-820576" has "Ready":"False" status (will retry)
W1124 09:05:29.884474 696018 node_ready.go:57] node "no-preload-820576" has "Ready":"False" status (will retry)
W1124 09:05:32.384199 696018 node_ready.go:57] node "no-preload-820576" has "Ready":"False" status (will retry)
I1124 09:05:31.637332 695520 node_ready.go:49] node "old-k8s-version-128377" is "Ready"
I1124 09:05:31.637368 695520 node_ready.go:38] duration metric: took 14.003009675s for node "old-k8s-version-128377" to be "Ready" ...
I1124 09:05:31.637385 695520 api_server.go:52] waiting for apiserver process to appear ...
I1124 09:05:31.637443 695520 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1124 09:05:31.650126 695520 api_server.go:72] duration metric: took 14.387953281s to wait for apiserver process to appear ...
I1124 09:05:31.650156 695520 api_server.go:88] waiting for apiserver healthz status ...
I1124 09:05:31.650179 695520 api_server.go:253] Checking apiserver healthz at https://192.168.103.2:8443/healthz ...
I1124 09:05:31.654078 695520 api_server.go:279] https://192.168.103.2:8443/healthz returned 200:
ok
I1124 09:05:31.655253 695520 api_server.go:141] control plane version: v1.28.0
I1124 09:05:31.655280 695520 api_server.go:131] duration metric: took 5.117021ms to wait for apiserver health ...
I1124 09:05:31.655289 695520 system_pods.go:43] waiting for kube-system pods to appear ...
I1124 09:05:31.658830 695520 system_pods.go:59] 8 kube-system pods found
I1124 09:05:31.658868 695520 system_pods.go:61] "coredns-5dd5756b68-vxxnm" [b84bae0f-9f75-4d1c-b2ed-da0c10a141cf] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:31.658877 695520 system_pods.go:61] "etcd-old-k8s-version-128377" [57d9a965-4f1a-455f-beec-16601bd921e2] Running
I1124 09:05:31.658889 695520 system_pods.go:61] "kindnet-gbp66" [49954742-ea7f-466f-80d8-7d6ac88ce36c] Running
I1124 09:05:31.658895 695520 system_pods.go:61] "kube-apiserver-old-k8s-version-128377" [08c8bb94-e597-4293-80f1-0981f51b22a4] Running
I1124 09:05:31.658906 695520 system_pods.go:61] "kube-controller-manager-old-k8s-version-128377" [1f721a4b-e1c3-4e18-92b4-13673dc37600] Running
I1124 09:05:31.658910 695520 system_pods.go:61] "kube-proxy-fpbs2" [52128126-550d-4795-9fa1-e1d3d9510dd3] Running
I1124 09:05:31.658916 695520 system_pods.go:61] "kube-scheduler-old-k8s-version-128377" [399dcc23-9970-4146-82b3-c72d3e5f621b] Running
I1124 09:05:31.658921 695520 system_pods.go:61] "storage-provisioner" [7e4f56c0-0b49-47cd-9278-129ad898b781] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:31.658927 695520 system_pods.go:74] duration metric: took 3.632262ms to wait for pod list to return data ...
I1124 09:05:31.658936 695520 default_sa.go:34] waiting for default service account to be created ...
I1124 09:05:31.660923 695520 default_sa.go:45] found service account: "default"
I1124 09:05:31.660942 695520 default_sa.go:55] duration metric: took 2.000088ms for default service account to be created ...
I1124 09:05:31.660950 695520 system_pods.go:116] waiting for k8s-apps to be running ...
I1124 09:05:31.664223 695520 system_pods.go:86] 8 kube-system pods found
I1124 09:05:31.664263 695520 system_pods.go:89] "coredns-5dd5756b68-vxxnm" [b84bae0f-9f75-4d1c-b2ed-da0c10a141cf] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:31.664272 695520 system_pods.go:89] "etcd-old-k8s-version-128377" [57d9a965-4f1a-455f-beec-16601bd921e2] Running
I1124 09:05:31.664280 695520 system_pods.go:89] "kindnet-gbp66" [49954742-ea7f-466f-80d8-7d6ac88ce36c] Running
I1124 09:05:31.664284 695520 system_pods.go:89] "kube-apiserver-old-k8s-version-128377" [08c8bb94-e597-4293-80f1-0981f51b22a4] Running
I1124 09:05:31.664287 695520 system_pods.go:89] "kube-controller-manager-old-k8s-version-128377" [1f721a4b-e1c3-4e18-92b4-13673dc37600] Running
I1124 09:05:31.664291 695520 system_pods.go:89] "kube-proxy-fpbs2" [52128126-550d-4795-9fa1-e1d3d9510dd3] Running
I1124 09:05:31.664294 695520 system_pods.go:89] "kube-scheduler-old-k8s-version-128377" [399dcc23-9970-4146-82b3-c72d3e5f621b] Running
I1124 09:05:31.664300 695520 system_pods.go:89] "storage-provisioner" [7e4f56c0-0b49-47cd-9278-129ad898b781] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:31.664333 695520 retry.go:31] will retry after 195.108791ms: missing components: kube-dns
I1124 09:05:31.863438 695520 system_pods.go:86] 8 kube-system pods found
I1124 09:05:31.863494 695520 system_pods.go:89] "coredns-5dd5756b68-vxxnm" [b84bae0f-9f75-4d1c-b2ed-da0c10a141cf] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:31.863505 695520 system_pods.go:89] "etcd-old-k8s-version-128377" [57d9a965-4f1a-455f-beec-16601bd921e2] Running
I1124 09:05:31.863515 695520 system_pods.go:89] "kindnet-gbp66" [49954742-ea7f-466f-80d8-7d6ac88ce36c] Running
I1124 09:05:31.863520 695520 system_pods.go:89] "kube-apiserver-old-k8s-version-128377" [08c8bb94-e597-4293-80f1-0981f51b22a4] Running
I1124 09:05:31.863525 695520 system_pods.go:89] "kube-controller-manager-old-k8s-version-128377" [1f721a4b-e1c3-4e18-92b4-13673dc37600] Running
I1124 09:05:31.863528 695520 system_pods.go:89] "kube-proxy-fpbs2" [52128126-550d-4795-9fa1-e1d3d9510dd3] Running
I1124 09:05:31.863540 695520 system_pods.go:89] "kube-scheduler-old-k8s-version-128377" [399dcc23-9970-4146-82b3-c72d3e5f621b] Running
I1124 09:05:31.863557 695520 system_pods.go:89] "storage-provisioner" [7e4f56c0-0b49-47cd-9278-129ad898b781] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:31.863579 695520 retry.go:31] will retry after 244.252087ms: missing components: kube-dns
I1124 09:05:32.111547 695520 system_pods.go:86] 8 kube-system pods found
I1124 09:05:32.111586 695520 system_pods.go:89] "coredns-5dd5756b68-vxxnm" [b84bae0f-9f75-4d1c-b2ed-da0c10a141cf] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:32.111595 695520 system_pods.go:89] "etcd-old-k8s-version-128377" [57d9a965-4f1a-455f-beec-16601bd921e2] Running
I1124 09:05:32.111603 695520 system_pods.go:89] "kindnet-gbp66" [49954742-ea7f-466f-80d8-7d6ac88ce36c] Running
I1124 09:05:32.111608 695520 system_pods.go:89] "kube-apiserver-old-k8s-version-128377" [08c8bb94-e597-4293-80f1-0981f51b22a4] Running
I1124 09:05:32.111614 695520 system_pods.go:89] "kube-controller-manager-old-k8s-version-128377" [1f721a4b-e1c3-4e18-92b4-13673dc37600] Running
I1124 09:05:32.111628 695520 system_pods.go:89] "kube-proxy-fpbs2" [52128126-550d-4795-9fa1-e1d3d9510dd3] Running
I1124 09:05:32.111634 695520 system_pods.go:89] "kube-scheduler-old-k8s-version-128377" [399dcc23-9970-4146-82b3-c72d3e5f621b] Running
I1124 09:05:32.111641 695520 system_pods.go:89] "storage-provisioner" [7e4f56c0-0b49-47cd-9278-129ad898b781] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:32.111660 695520 retry.go:31] will retry after 471.342676ms: missing components: kube-dns
I1124 09:05:32.587354 695520 system_pods.go:86] 8 kube-system pods found
I1124 09:05:32.587384 695520 system_pods.go:89] "coredns-5dd5756b68-vxxnm" [b84bae0f-9f75-4d1c-b2ed-da0c10a141cf] Running
I1124 09:05:32.587389 695520 system_pods.go:89] "etcd-old-k8s-version-128377" [57d9a965-4f1a-455f-beec-16601bd921e2] Running
I1124 09:05:32.587393 695520 system_pods.go:89] "kindnet-gbp66" [49954742-ea7f-466f-80d8-7d6ac88ce36c] Running
I1124 09:05:32.587397 695520 system_pods.go:89] "kube-apiserver-old-k8s-version-128377" [08c8bb94-e597-4293-80f1-0981f51b22a4] Running
I1124 09:05:32.587402 695520 system_pods.go:89] "kube-controller-manager-old-k8s-version-128377" [1f721a4b-e1c3-4e18-92b4-13673dc37600] Running
I1124 09:05:32.587405 695520 system_pods.go:89] "kube-proxy-fpbs2" [52128126-550d-4795-9fa1-e1d3d9510dd3] Running
I1124 09:05:32.587408 695520 system_pods.go:89] "kube-scheduler-old-k8s-version-128377" [399dcc23-9970-4146-82b3-c72d3e5f621b] Running
I1124 09:05:32.587411 695520 system_pods.go:89] "storage-provisioner" [7e4f56c0-0b49-47cd-9278-129ad898b781] Running
I1124 09:05:32.587420 695520 system_pods.go:126] duration metric: took 926.463548ms to wait for k8s-apps to be running ...
I1124 09:05:32.587428 695520 system_svc.go:44] waiting for kubelet service to be running ....
I1124 09:05:32.587503 695520 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 09:05:32.602305 695520 system_svc.go:56] duration metric: took 14.864147ms WaitForService to wait for kubelet
I1124 09:05:32.602336 695520 kubeadm.go:587] duration metric: took 15.340181249s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 09:05:32.602385 695520 node_conditions.go:102] verifying NodePressure condition ...
I1124 09:05:32.605212 695520 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1124 09:05:32.605242 695520 node_conditions.go:123] node cpu capacity is 8
I1124 09:05:32.605271 695520 node_conditions.go:105] duration metric: took 2.87532ms to run NodePressure ...
I1124 09:05:32.605293 695520 start.go:242] waiting for startup goroutines ...
I1124 09:05:32.605308 695520 start.go:247] waiting for cluster config update ...
I1124 09:05:32.605327 695520 start.go:256] writing updated cluster config ...
I1124 09:05:32.605690 695520 ssh_runner.go:195] Run: rm -f paused
I1124 09:05:32.610319 695520 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 09:05:32.614557 695520 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-vxxnm" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:32.619322 695520 pod_ready.go:94] pod "coredns-5dd5756b68-vxxnm" is "Ready"
I1124 09:05:32.619349 695520 pod_ready.go:86] duration metric: took 4.765973ms for pod "coredns-5dd5756b68-vxxnm" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:32.622417 695520 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:32.626873 695520 pod_ready.go:94] pod "etcd-old-k8s-version-128377" is "Ready"
I1124 09:05:32.626900 695520 pod_ready.go:86] duration metric: took 4.45394ms for pod "etcd-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:32.629800 695520 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:32.634310 695520 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-128377" is "Ready"
I1124 09:05:32.634338 695520 pod_ready.go:86] duration metric: took 4.514426ms for pod "kube-apiserver-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:32.637382 695520 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:33.015375 695520 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-128377" is "Ready"
I1124 09:05:33.015406 695520 pod_ready.go:86] duration metric: took 378.000797ms for pod "kube-controller-manager-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:33.215146 695520 pod_ready.go:83] waiting for pod "kube-proxy-fpbs2" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:33.614362 695520 pod_ready.go:94] pod "kube-proxy-fpbs2" is "Ready"
I1124 09:05:33.614392 695520 pod_ready.go:86] duration metric: took 399.215049ms for pod "kube-proxy-fpbs2" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:33.815166 695520 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.214969 695520 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-128377" is "Ready"
I1124 09:05:34.214999 695520 pod_ready.go:86] duration metric: took 399.806564ms for pod "kube-scheduler-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.215011 695520 pod_ready.go:40] duration metric: took 1.604660669s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 09:05:34.261989 695520 start.go:625] kubectl: 1.34.2, cluster: 1.28.0 (minor skew: 6)
I1124 09:05:34.263612 695520 out.go:203]
W1124 09:05:34.264723 695520 out.go:285] ! /usr/local/bin/kubectl is version 1.34.2, which may have incompatibilities with Kubernetes 1.28.0.
I1124 09:05:34.265770 695520 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1124 09:05:34.267170 695520 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-128377" cluster and "default" namespace by default
I1124 09:05:32.422898 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1124 09:05:32.423021 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 09:05:32.423106 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 09:05:32.453902 685562 cri.go:89] found id: "1c3ac7689834f46a67038f7d9d8749dd11964dbb2214dc5f58152210452bc365"
I1124 09:05:32.453922 685562 cri.go:89] found id: "4d75c0e16a149ca1a7ec4e96d68718e51659aa9619085a44b28b38f4a7716680"
I1124 09:05:32.453927 685562 cri.go:89] found id: "7359853367f0edc54ad7b43f974b25c5e084487a9f1f0e85d38c8ad9736fcd00"
I1124 09:05:32.453929 685562 cri.go:89] found id: ""
I1124 09:05:32.453937 685562 logs.go:282] 3 containers: [1c3ac7689834f46a67038f7d9d8749dd11964dbb2214dc5f58152210452bc365 4d75c0e16a149ca1a7ec4e96d68718e51659aa9619085a44b28b38f4a7716680 7359853367f0edc54ad7b43f974b25c5e084487a9f1f0e85d38c8ad9736fcd00]
I1124 09:05:32.454000 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.458469 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.462439 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.466262 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 09:05:32.466335 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 09:05:32.496086 685562 cri.go:89] found id: "b0f5e195a2427e1475b232369ca31232e850412d5ccf99c87ab9d6ef0d230ec2"
I1124 09:05:32.496112 685562 cri.go:89] found id: ""
I1124 09:05:32.496122 685562 logs.go:282] 1 containers: [b0f5e195a2427e1475b232369ca31232e850412d5ccf99c87ab9d6ef0d230ec2]
I1124 09:05:32.496186 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.500443 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 09:05:32.500532 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 09:05:32.528567 685562 cri.go:89] found id: ""
I1124 09:05:32.528602 685562 logs.go:282] 0 containers: []
W1124 09:05:32.528610 685562 logs.go:284] No container was found matching "coredns"
I1124 09:05:32.528617 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 09:05:32.528677 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 09:05:32.557355 685562 cri.go:89] found id: "b018c37b5155a45849bf7701c25cfd1ff2e5d08a4a174fd7447b3d1e5014bc17"
I1124 09:05:32.557375 685562 cri.go:89] found id: "beba2c039cf143777ad7314b49e8a78d52025ed5525530635c9debdb1ab66ce9"
I1124 09:05:32.557379 685562 cri.go:89] found id: ""
I1124 09:05:32.557388 685562 logs.go:282] 2 containers: [b018c37b5155a45849bf7701c25cfd1ff2e5d08a4a174fd7447b3d1e5014bc17 beba2c039cf143777ad7314b49e8a78d52025ed5525530635c9debdb1ab66ce9]
I1124 09:05:32.557445 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.561666 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.565691 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 09:05:32.565776 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 09:05:32.594818 685562 cri.go:89] found id: ""
I1124 09:05:32.594841 685562 logs.go:282] 0 containers: []
W1124 09:05:32.594848 685562 logs.go:284] No container was found matching "kube-proxy"
I1124 09:05:32.594855 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 09:05:32.594900 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 09:05:32.625049 685562 cri.go:89] found id: "4fe764a0d4480b2b9c1a7e51dc63c845a71b6a2a78a4861dbbf794ad3bd3079d"
I1124 09:05:32.625068 685562 cri.go:89] found id: "87fb36f1d5c6bc7114bcd8099f1af4b27cea41c648c6e97f4789f111172ccbb0"
I1124 09:05:32.625073 685562 cri.go:89] found id: "c70fdaa8d0b65a6cc40d923809782c40bad08a66e1cd7ef35c3bd63e2344a7d0"
I1124 09:05:32.625078 685562 cri.go:89] found id: ""
I1124 09:05:32.625087 685562 logs.go:282] 3 containers: [4fe764a0d4480b2b9c1a7e51dc63c845a71b6a2a78a4861dbbf794ad3bd3079d 87fb36f1d5c6bc7114bcd8099f1af4b27cea41c648c6e97f4789f111172ccbb0 c70fdaa8d0b65a6cc40d923809782c40bad08a66e1cd7ef35c3bd63e2344a7d0]
I1124 09:05:32.625142 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.630042 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.634965 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.639315 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 09:05:32.639376 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 09:05:32.669355 685562 cri.go:89] found id: ""
I1124 09:05:32.669384 685562 logs.go:282] 0 containers: []
W1124 09:05:32.669392 685562 logs.go:284] No container was found matching "kindnet"
I1124 09:05:32.669398 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 09:05:32.669449 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 09:05:32.697559 685562 cri.go:89] found id: ""
I1124 09:05:32.697586 685562 logs.go:282] 0 containers: []
W1124 09:05:32.697596 685562 logs.go:284] No container was found matching "storage-provisioner"
I1124 09:05:32.697610 685562 logs.go:123] Gathering logs for containerd ...
I1124 09:05:32.697645 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 09:05:32.736120 685562 logs.go:123] Gathering logs for container status ...
I1124 09:05:32.736153 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 09:05:32.768484 685562 logs.go:123] Gathering logs for kubelet ...
I1124 09:05:32.768526 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 09:05:32.836058 685562 logs.go:123] Gathering logs for dmesg ...
I1124 09:05:32.836100 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 09:05:32.853541 685562 logs.go:123] Gathering logs for describe nodes ...
I1124 09:05:32.853613 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1124 09:05:33.384739 696018 node_ready.go:49] node "no-preload-820576" is "Ready"
I1124 09:05:33.384778 696018 node_ready.go:38] duration metric: took 14.503869435s for node "no-preload-820576" to be "Ready" ...
I1124 09:05:33.384797 696018 api_server.go:52] waiting for apiserver process to appear ...
I1124 09:05:33.384861 696018 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1124 09:05:33.401268 696018 api_server.go:72] duration metric: took 14.81022929s to wait for apiserver process to appear ...
I1124 09:05:33.401299 696018 api_server.go:88] waiting for apiserver healthz status ...
I1124 09:05:33.401324 696018 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1124 09:05:33.406015 696018 api_server.go:279] https://192.168.85.2:8443/healthz returned 200:
ok
I1124 09:05:33.407175 696018 api_server.go:141] control plane version: v1.35.0-beta.0
I1124 09:05:33.407215 696018 api_server.go:131] duration metric: took 5.908148ms to wait for apiserver health ...
I1124 09:05:33.407226 696018 system_pods.go:43] waiting for kube-system pods to appear ...
I1124 09:05:33.410293 696018 system_pods.go:59] 8 kube-system pods found
I1124 09:05:33.410331 696018 system_pods.go:61] "coredns-7d764666f9-b6dpn" [c84a0b09-07a2-4e6a-928a-b9aca9e3b1a1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:33.410338 696018 system_pods.go:61] "etcd-no-preload-820576" [39f892d7-184f-4858-be8f-174718ac6aaf] Running
I1124 09:05:33.410346 696018 system_pods.go:61] "kindnet-kvm52" [967c23e8-7e42-4034-b5a2-e4cd65bc4d94] Running
I1124 09:05:33.410352 696018 system_pods.go:61] "kube-apiserver-no-preload-820576" [d5294a7a-2337-4ef4-82a2-20d85daf8739] Running
I1124 09:05:33.410360 696018 system_pods.go:61] "kube-controller-manager-no-preload-820576" [e6320a0d-f5cf-4a17-af3d-6fa87f1e02ad] Running
I1124 09:05:33.410365 696018 system_pods.go:61] "kube-proxy-vz24l" [4a64a474-1e1b-411d-aea6-9d12e1d9f84e] Running
I1124 09:05:33.410369 696018 system_pods.go:61] "kube-scheduler-no-preload-820576" [9fd536e3-1a01-4c16-bf46-75db8f38b3f4] Running
I1124 09:05:33.410382 696018 system_pods.go:61] "storage-provisioner" [144d237b-4f80-441d-867b-0ee26edd8590] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:33.410391 696018 system_pods.go:74] duration metric: took 3.156993ms to wait for pod list to return data ...
I1124 09:05:33.410403 696018 default_sa.go:34] waiting for default service account to be created ...
I1124 09:05:33.413158 696018 default_sa.go:45] found service account: "default"
I1124 09:05:33.413182 696018 default_sa.go:55] duration metric: took 2.772178ms for default service account to be created ...
I1124 09:05:33.413192 696018 system_pods.go:116] waiting for k8s-apps to be running ...
I1124 09:05:33.416818 696018 system_pods.go:86] 8 kube-system pods found
I1124 09:05:33.416849 696018 system_pods.go:89] "coredns-7d764666f9-b6dpn" [c84a0b09-07a2-4e6a-928a-b9aca9e3b1a1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:33.416856 696018 system_pods.go:89] "etcd-no-preload-820576" [39f892d7-184f-4858-be8f-174718ac6aaf] Running
I1124 09:05:33.416863 696018 system_pods.go:89] "kindnet-kvm52" [967c23e8-7e42-4034-b5a2-e4cd65bc4d94] Running
I1124 09:05:33.416868 696018 system_pods.go:89] "kube-apiserver-no-preload-820576" [d5294a7a-2337-4ef4-82a2-20d85daf8739] Running
I1124 09:05:33.416874 696018 system_pods.go:89] "kube-controller-manager-no-preload-820576" [e6320a0d-f5cf-4a17-af3d-6fa87f1e02ad] Running
I1124 09:05:33.416879 696018 system_pods.go:89] "kube-proxy-vz24l" [4a64a474-1e1b-411d-aea6-9d12e1d9f84e] Running
I1124 09:05:33.416884 696018 system_pods.go:89] "kube-scheduler-no-preload-820576" [9fd536e3-1a01-4c16-bf46-75db8f38b3f4] Running
I1124 09:05:33.416891 696018 system_pods.go:89] "storage-provisioner" [144d237b-4f80-441d-867b-0ee26edd8590] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:33.416935 696018 retry.go:31] will retry after 275.944352ms: missing components: kube-dns
I1124 09:05:33.697203 696018 system_pods.go:86] 8 kube-system pods found
I1124 09:05:33.697247 696018 system_pods.go:89] "coredns-7d764666f9-b6dpn" [c84a0b09-07a2-4e6a-928a-b9aca9e3b1a1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:33.697259 696018 system_pods.go:89] "etcd-no-preload-820576" [39f892d7-184f-4858-be8f-174718ac6aaf] Running
I1124 09:05:33.697269 696018 system_pods.go:89] "kindnet-kvm52" [967c23e8-7e42-4034-b5a2-e4cd65bc4d94] Running
I1124 09:05:33.697274 696018 system_pods.go:89] "kube-apiserver-no-preload-820576" [d5294a7a-2337-4ef4-82a2-20d85daf8739] Running
I1124 09:05:33.697285 696018 system_pods.go:89] "kube-controller-manager-no-preload-820576" [e6320a0d-f5cf-4a17-af3d-6fa87f1e02ad] Running
I1124 09:05:33.697290 696018 system_pods.go:89] "kube-proxy-vz24l" [4a64a474-1e1b-411d-aea6-9d12e1d9f84e] Running
I1124 09:05:33.697297 696018 system_pods.go:89] "kube-scheduler-no-preload-820576" [9fd536e3-1a01-4c16-bf46-75db8f38b3f4] Running
I1124 09:05:33.697304 696018 system_pods.go:89] "storage-provisioner" [144d237b-4f80-441d-867b-0ee26edd8590] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:33.697327 696018 retry.go:31] will retry after 278.68714ms: missing components: kube-dns
I1124 09:05:33.979933 696018 system_pods.go:86] 8 kube-system pods found
I1124 09:05:33.979971 696018 system_pods.go:89] "coredns-7d764666f9-b6dpn" [c84a0b09-07a2-4e6a-928a-b9aca9e3b1a1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:33.979977 696018 system_pods.go:89] "etcd-no-preload-820576" [39f892d7-184f-4858-be8f-174718ac6aaf] Running
I1124 09:05:33.979984 696018 system_pods.go:89] "kindnet-kvm52" [967c23e8-7e42-4034-b5a2-e4cd65bc4d94] Running
I1124 09:05:33.979987 696018 system_pods.go:89] "kube-apiserver-no-preload-820576" [d5294a7a-2337-4ef4-82a2-20d85daf8739] Running
I1124 09:05:33.979991 696018 system_pods.go:89] "kube-controller-manager-no-preload-820576" [e6320a0d-f5cf-4a17-af3d-6fa87f1e02ad] Running
I1124 09:05:33.979994 696018 system_pods.go:89] "kube-proxy-vz24l" [4a64a474-1e1b-411d-aea6-9d12e1d9f84e] Running
I1124 09:05:33.979998 696018 system_pods.go:89] "kube-scheduler-no-preload-820576" [9fd536e3-1a01-4c16-bf46-75db8f38b3f4] Running
I1124 09:05:33.980003 696018 system_pods.go:89] "storage-provisioner" [144d237b-4f80-441d-867b-0ee26edd8590] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:33.980020 696018 retry.go:31] will retry after 448.083964ms: missing components: kube-dns
I1124 09:05:34.432301 696018 system_pods.go:86] 8 kube-system pods found
I1124 09:05:34.432341 696018 system_pods.go:89] "coredns-7d764666f9-b6dpn" [c84a0b09-07a2-4e6a-928a-b9aca9e3b1a1] Running
I1124 09:05:34.432350 696018 system_pods.go:89] "etcd-no-preload-820576" [39f892d7-184f-4858-be8f-174718ac6aaf] Running
I1124 09:05:34.432355 696018 system_pods.go:89] "kindnet-kvm52" [967c23e8-7e42-4034-b5a2-e4cd65bc4d94] Running
I1124 09:05:34.432362 696018 system_pods.go:89] "kube-apiserver-no-preload-820576" [d5294a7a-2337-4ef4-82a2-20d85daf8739] Running
I1124 09:05:34.432369 696018 system_pods.go:89] "kube-controller-manager-no-preload-820576" [e6320a0d-f5cf-4a17-af3d-6fa87f1e02ad] Running
I1124 09:05:34.432374 696018 system_pods.go:89] "kube-proxy-vz24l" [4a64a474-1e1b-411d-aea6-9d12e1d9f84e] Running
I1124 09:05:34.432379 696018 system_pods.go:89] "kube-scheduler-no-preload-820576" [9fd536e3-1a01-4c16-bf46-75db8f38b3f4] Running
I1124 09:05:34.432384 696018 system_pods.go:89] "storage-provisioner" [144d237b-4f80-441d-867b-0ee26edd8590] Running
I1124 09:05:34.432395 696018 system_pods.go:126] duration metric: took 1.019195458s to wait for k8s-apps to be running ...
I1124 09:05:34.432410 696018 system_svc.go:44] waiting for kubelet service to be running ....
I1124 09:05:34.432534 696018 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 09:05:34.451401 696018 system_svc.go:56] duration metric: took 18.978773ms WaitForService to wait for kubelet
I1124 09:05:34.451444 696018 kubeadm.go:587] duration metric: took 15.860405681s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 09:05:34.451483 696018 node_conditions.go:102] verifying NodePressure condition ...
I1124 09:05:34.454386 696018 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1124 09:05:34.454410 696018 node_conditions.go:123] node cpu capacity is 8
I1124 09:05:34.454427 696018 node_conditions.go:105] duration metric: took 2.938205ms to run NodePressure ...
I1124 09:05:34.454440 696018 start.go:242] waiting for startup goroutines ...
I1124 09:05:34.454450 696018 start.go:247] waiting for cluster config update ...
I1124 09:05:34.454478 696018 start.go:256] writing updated cluster config ...
I1124 09:05:34.454771 696018 ssh_runner.go:195] Run: rm -f paused
I1124 09:05:34.459160 696018 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 09:05:34.462567 696018 pod_ready.go:83] waiting for pod "coredns-7d764666f9-b6dpn" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.466303 696018 pod_ready.go:94] pod "coredns-7d764666f9-b6dpn" is "Ready"
I1124 09:05:34.466324 696018 pod_ready.go:86] duration metric: took 3.738029ms for pod "coredns-7d764666f9-b6dpn" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.468156 696018 pod_ready.go:83] waiting for pod "etcd-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.471750 696018 pod_ready.go:94] pod "etcd-no-preload-820576" is "Ready"
I1124 09:05:34.471775 696018 pod_ready.go:86] duration metric: took 3.597676ms for pod "etcd-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.473507 696018 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.477092 696018 pod_ready.go:94] pod "kube-apiserver-no-preload-820576" is "Ready"
I1124 09:05:34.477115 696018 pod_ready.go:86] duration metric: took 3.588223ms for pod "kube-apiserver-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.478724 696018 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.862953 696018 pod_ready.go:94] pod "kube-controller-manager-no-preload-820576" is "Ready"
I1124 09:05:34.862977 696018 pod_ready.go:86] duration metric: took 384.235741ms for pod "kube-controller-manager-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:35.063039 696018 pod_ready.go:83] waiting for pod "kube-proxy-vz24l" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:35.463183 696018 pod_ready.go:94] pod "kube-proxy-vz24l" is "Ready"
I1124 09:05:35.463217 696018 pod_ready.go:86] duration metric: took 400.149042ms for pod "kube-proxy-vz24l" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:35.664151 696018 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:36.063590 696018 pod_ready.go:94] pod "kube-scheduler-no-preload-820576" is "Ready"
I1124 09:05:36.063619 696018 pod_ready.go:86] duration metric: took 399.441074ms for pod "kube-scheduler-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:36.063632 696018 pod_ready.go:40] duration metric: took 1.604443296s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 09:05:36.110852 696018 start.go:625] kubectl: 1.34.2, cluster: 1.35.0-beta.0 (minor skew: 1)
I1124 09:05:36.112796 696018 out.go:179] * Done! kubectl is now configured to use "no-preload-820576" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
92908e44718b7 56cc512116c8f 7 seconds ago Running busybox 0 1ee15af433557 busybox default
a7a841ea7303a ead0a4a53df89 12 seconds ago Running coredns 0 5cd1e9dd6b4b4 coredns-5dd5756b68-vxxnm kube-system
a9a5857553e67 6e38f40d628db 12 seconds ago Running storage-provisioner 0 6128b1854bc49 storage-provisioner kube-system
818537e08c060 409467f978b4a 23 seconds ago Running kindnet-cni 0 cd819a24f784f kindnet-gbp66 kube-system
370631aaaf577 ea1030da44aa1 26 seconds ago Running kube-proxy 0 17a629fbc9de7 kube-proxy-fpbs2 kube-system
f5eddecfb179f f6f496300a2ae 44 seconds ago Running kube-scheduler 0 d4658a7b318ec kube-scheduler-old-k8s-version-128377 kube-system
5d9ec22e03b8b 4be79c38a4bab 44 seconds ago Running kube-controller-manager 0 f3a2eced02a3b kube-controller-manager-old-k8s-version-128377 kube-system
842bd9db2d84b bb5e0dde9054c 44 seconds ago Running kube-apiserver 0 879c975eb1a53 kube-apiserver-old-k8s-version-128377 kube-system
8df3112d99751 73deb9a3f7025 44 seconds ago Running etcd 0 78f7483f85b14 etcd-old-k8s-version-128377 kube-system
==> containerd <==
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.013913791Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-5dd5756b68-vxxnm,Uid:b84bae0f-9f75-4d1c-b2ed-da0c10a141cf,Namespace:kube-system,Attempt:0,} returns sandbox id \"5cd1e9dd6b4b4d2ac225fd496f6fac6cfc490bdb385b217119ffd695f763abf3\""
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.016899714Z" level=info msg="CreateContainer within sandbox \"5cd1e9dd6b4b4d2ac225fd496f6fac6cfc490bdb385b217119ffd695f763abf3\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.024116931Z" level=info msg="Container a7a841ea7303a40b7b557fbe769c57a1562346d875b1853a8a729ad668090cb5: CDI devices from CRI Config.CDIDevices: []"
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.030290587Z" level=info msg="CreateContainer within sandbox \"5cd1e9dd6b4b4d2ac225fd496f6fac6cfc490bdb385b217119ffd695f763abf3\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"a7a841ea7303a40b7b557fbe769c57a1562346d875b1853a8a729ad668090cb5\""
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.030773995Z" level=info msg="StartContainer for \"a7a841ea7303a40b7b557fbe769c57a1562346d875b1853a8a729ad668090cb5\""
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.031567693Z" level=info msg="connecting to shim a7a841ea7303a40b7b557fbe769c57a1562346d875b1853a8a729ad668090cb5" address="unix:///run/containerd/s/7e80e31b141e93e01901781df29b4edcac7d62ec3fd02a2cc1cde1ffde438980" protocol=ttrpc version=3
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.070950416Z" level=info msg="StartContainer for \"a9a5857553e67019e47641c1970bb0d5555afd6b608c94a94501dd485efac0c4\" returns successfully"
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.075707267Z" level=info msg="StartContainer for \"a7a841ea7303a40b7b557fbe769c57a1562346d875b1853a8a729ad668090cb5\" returns successfully"
Nov 24 09:05:34 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:34.747845169Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:bfaec734-d874-4dcb-b31f-feb87adccfca,Namespace:default,Attempt:0,}"
Nov 24 09:05:34 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:34.786693345Z" level=info msg="connecting to shim 1ee15af4335571d5c2c1f8cf460b21232bfc82973349a4c00a86f5a2545492a2" address="unix:///run/containerd/s/b51cd8663d01a7c675d7f65aecc44f4b6281e3382088734fe56170e879775890" namespace=k8s.io protocol=ttrpc version=3
Nov 24 09:05:34 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:34.851781414Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:bfaec734-d874-4dcb-b31f-feb87adccfca,Namespace:default,Attempt:0,} returns sandbox id \"1ee15af4335571d5c2c1f8cf460b21232bfc82973349a4c00a86f5a2545492a2\""
Nov 24 09:05:34 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:34.853515051Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.357982384Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.358604580Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396643"
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.359790616Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.361443799Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.361898949Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.508337162s"
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.361934177Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.363533599Z" level=info msg="CreateContainer within sandbox \"1ee15af4335571d5c2c1f8cf460b21232bfc82973349a4c00a86f5a2545492a2\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.369396201Z" level=info msg="Container 92908e44718b76213a4fd87e310efd757d73940a581879283782328fd7a0dfa9: CDI devices from CRI Config.CDIDevices: []"
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.374660363Z" level=info msg="CreateContainer within sandbox \"1ee15af4335571d5c2c1f8cf460b21232bfc82973349a4c00a86f5a2545492a2\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"92908e44718b76213a4fd87e310efd757d73940a581879283782328fd7a0dfa9\""
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.375182989Z" level=info msg="StartContainer for \"92908e44718b76213a4fd87e310efd757d73940a581879283782328fd7a0dfa9\""
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.376051696Z" level=info msg="connecting to shim 92908e44718b76213a4fd87e310efd757d73940a581879283782328fd7a0dfa9" address="unix:///run/containerd/s/b51cd8663d01a7c675d7f65aecc44f4b6281e3382088734fe56170e879775890" protocol=ttrpc version=3
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.425776823Z" level=info msg="StartContainer for \"92908e44718b76213a4fd87e310efd757d73940a581879283782328fd7a0dfa9\" returns successfully"
Nov 24 09:05:43 old-k8s-version-128377 containerd[661]: E1124 09:05:43.526421 661 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [a7a841ea7303a40b7b557fbe769c57a1562346d875b1853a8a729ad668090cb5] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 25cf5af2951e282c4b0e961a02fb5d3e57c974501832fee92eec17b5135b9ec9d9e87d2ac94e6d117a5ed3dd54e8800aa7b4479706eb54497145ccdb80397d1b
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:54326 - 65005 "HINFO IN 6565264189616162908.3935264129304859187. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.029224592s
==> describe nodes <==
Name: old-k8s-version-128377
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-128377
kubernetes.io/os=linux
minikube.k8s.io/commit=393ee3e0b845623107dce6cda4f48ffd5c3d1811
minikube.k8s.io/name=old-k8s-version-128377
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_24T09_05_05_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 24 Nov 2025 09:05:01 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-128377
AcquireTime: <unset>
RenewTime: Mon, 24 Nov 2025 09:05:34 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 24 Nov 2025 09:05:35 +0000 Mon, 24 Nov 2025 09:05:00 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 24 Nov 2025 09:05:35 +0000 Mon, 24 Nov 2025 09:05:00 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 24 Nov 2025 09:05:35 +0000 Mon, 24 Nov 2025 09:05:00 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 24 Nov 2025 09:05:35 +0000 Mon, 24 Nov 2025 09:05:31 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.103.2
Hostname: old-k8s-version-128377
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
System Info:
Machine ID: 9629f1d5bc1ed524a56ce23c69214c09
System UUID: 220a6d4b-4a36-435b-ad8f-2d418f4618a1
Boot ID: f052cd47-57de-4521-b5fb-139979fdced9
Kernel Version: 6.8.0-1044-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 10s
kube-system coredns-5dd5756b68-vxxnm 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 27s
kube-system etcd-old-k8s-version-128377 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 40s
kube-system kindnet-gbp66 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 27s
kube-system kube-apiserver-old-k8s-version-128377 250m (3%) 0 (0%) 0 (0%) 0 (0%) 40s
kube-system kube-controller-manager-old-k8s-version-128377 200m (2%) 0 (0%) 0 (0%) 0 (0%) 40s
kube-system kube-proxy-fpbs2 0 (0%) 0 (0%) 0 (0%) 0 (0%) 27s
kube-system kube-scheduler-old-k8s-version-128377 100m (1%) 0 (0%) 0 (0%) 0 (0%) 40s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 27s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 26s kube-proxy
Normal Starting 46s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 45s (x8 over 45s) kubelet Node old-k8s-version-128377 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 45s (x8 over 45s) kubelet Node old-k8s-version-128377 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 45s (x7 over 45s) kubelet Node old-k8s-version-128377 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 45s kubelet Updated Node Allocatable limit across pods
Normal Starting 40s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 40s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 40s kubelet Node old-k8s-version-128377 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 40s kubelet Node old-k8s-version-128377 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 40s kubelet Node old-k8s-version-128377 status is now: NodeHasSufficientPID
Normal RegisteredNode 28s node-controller Node old-k8s-version-128377 event: Registered Node old-k8s-version-128377 in Controller
Normal NodeReady 13s kubelet Node old-k8s-version-128377 status is now: NodeReady
==> dmesg <==
==> etcd [8df3112d99751cf0ed66add055e0df50e3c944dbb66b787e2e3ae37efbec7d4e] <==
{"level":"info","ts":"2025-11-24T09:05:00.107581Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-24T09:05:00.107626Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"3336683c081d149d","local-member-id":"f23060b075c4c089","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T09:05:00.107753Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T09:05:00.10778Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T09:05:00.10887Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-24T09:05:00.108869Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.103.2:2379"}
{"level":"info","ts":"2025-11-24T09:05:01.710895Z","caller":"traceutil/trace.go:171","msg":"trace[1442253581] transaction","detail":"{read_only:false; response_revision:20; number_of_response:1; }","duration":"170.61339ms","start":"2025-11-24T09:05:01.540258Z","end":"2025-11-24T09:05:01.710871Z","steps":["trace[1442253581] 'process raft request' (duration: 170.544438ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:01.711011Z","caller":"traceutil/trace.go:171","msg":"trace[699662152] transaction","detail":"{read_only:false; response_revision:19; number_of_response:1; }","duration":"172.264745ms","start":"2025-11-24T09:05:01.538726Z","end":"2025-11-24T09:05:01.710991Z","steps":["trace[699662152] 'process raft request' (duration: 172.04013ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-24T09:05:01.711031Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"172.576061ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/certificatesigningrequests/csr-9x9d8\" ","response":"range_response_count:1 size:895"}
{"level":"info","ts":"2025-11-24T09:05:01.710896Z","caller":"traceutil/trace.go:171","msg":"trace[1006472868] transaction","detail":"{read_only:false; response_revision:18; number_of_response:1; }","duration":"172.691781ms","start":"2025-11-24T09:05:01.538162Z","end":"2025-11-24T09:05:01.710854Z","steps":["trace[1006472868] 'process raft request' (duration: 109.125575ms)","trace[1006472868] 'compare' (duration: 63.355357ms)"],"step_count":2}
{"level":"info","ts":"2025-11-24T09:05:01.710915Z","caller":"traceutil/trace.go:171","msg":"trace[981263403] transaction","detail":"{read_only:false; response_revision:21; number_of_response:1; }","duration":"170.391166ms","start":"2025-11-24T09:05:01.540518Z","end":"2025-11-24T09:05:01.710909Z","steps":["trace[981263403] 'process raft request' (duration: 170.307811ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:01.711086Z","caller":"traceutil/trace.go:171","msg":"trace[1918024405] range","detail":"{range_begin:/registry/certificatesigningrequests/csr-9x9d8; range_end:; response_count:1; response_revision:22; }","duration":"172.654948ms","start":"2025-11-24T09:05:01.538422Z","end":"2025-11-24T09:05:01.711077Z","steps":["trace[1918024405] 'agreement among raft nodes before linearized reading' (duration: 172.512588ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:01.710914Z","caller":"traceutil/trace.go:171","msg":"trace[1488131719] linearizableReadLoop","detail":"{readStateIndex:22; appliedIndex:18; }","duration":"172.460174ms","start":"2025-11-24T09:05:01.53844Z","end":"2025-11-24T09:05:01.7109Z","steps":["trace[1488131719] 'read index received' (duration: 25.895675ms)","trace[1488131719] 'applied index is now lower than readState.Index' (duration: 146.559971ms)"],"step_count":2}
{"level":"info","ts":"2025-11-24T09:05:01.711054Z","caller":"traceutil/trace.go:171","msg":"trace[1678514513] transaction","detail":"{read_only:false; response_revision:22; number_of_response:1; }","duration":"149.8797ms","start":"2025-11-24T09:05:01.561163Z","end":"2025-11-24T09:05:01.711042Z","steps":["trace[1678514513] 'process raft request' (duration: 149.700045ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:01.711435Z","caller":"traceutil/trace.go:171","msg":"trace[2085549652] transaction","detail":"{read_only:false; response_revision:23; number_of_response:1; }","duration":"144.831606ms","start":"2025-11-24T09:05:01.566593Z","end":"2025-11-24T09:05:01.711425Z","steps":["trace[2085549652] 'process raft request' (duration: 144.652194ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:01.711454Z","caller":"traceutil/trace.go:171","msg":"trace[1776690454] transaction","detail":"{read_only:false; response_revision:24; number_of_response:1; }","duration":"143.564662ms","start":"2025-11-24T09:05:01.567876Z","end":"2025-11-24T09:05:01.71144Z","steps":["trace[1776690454] 'process raft request' (duration: 143.429904ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-24T09:05:01.711724Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"146.213558ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/namespaces/kube-system\" ","response":"range_response_count:1 size:350"}
{"level":"info","ts":"2025-11-24T09:05:01.711757Z","caller":"traceutil/trace.go:171","msg":"trace[366826393] range","detail":"{range_begin:/registry/namespaces/kube-system; range_end:; response_count:1; response_revision:25; }","duration":"146.253881ms","start":"2025-11-24T09:05:01.565494Z","end":"2025-11-24T09:05:01.711748Z","steps":["trace[366826393] 'agreement among raft nodes before linearized reading' (duration: 146.18478ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:01.711931Z","caller":"traceutil/trace.go:171","msg":"trace[1923893862] transaction","detail":"{read_only:false; response_revision:25; number_of_response:1; }","duration":"137.068438ms","start":"2025-11-24T09:05:01.574851Z","end":"2025-11-24T09:05:01.711919Z","steps":["trace[1923893862] 'process raft request' (duration: 136.481982ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-24T09:05:01.712125Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"171.955875ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/configmaps/kube-system/extension-apiserver-authentication\" ","response":"range_response_count:0 size:4"}
{"level":"info","ts":"2025-11-24T09:05:01.712163Z","caller":"traceutil/trace.go:171","msg":"trace[90940555] range","detail":"{range_begin:/registry/configmaps/kube-system/extension-apiserver-authentication; range_end:; response_count:0; response_revision:25; }","duration":"172.012061ms","start":"2025-11-24T09:05:01.54014Z","end":"2025-11-24T09:05:01.712153Z","steps":["trace[90940555] 'agreement among raft nodes before linearized reading' (duration: 171.930715ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-24T09:05:01.714609Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"175.250502ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/minions/old-k8s-version-128377\" ","response":"range_response_count:1 size:3558"}
{"level":"info","ts":"2025-11-24T09:05:01.714708Z","caller":"traceutil/trace.go:171","msg":"trace[322045522] range","detail":"{range_begin:/registry/minions/old-k8s-version-128377; range_end:; response_count:1; response_revision:25; }","duration":"175.353553ms","start":"2025-11-24T09:05:01.539338Z","end":"2025-11-24T09:05:01.714691Z","steps":["trace[322045522] 'agreement among raft nodes before linearized reading' (duration: 172.031487ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:03.559324Z","caller":"traceutil/trace.go:171","msg":"trace[627044044] transaction","detail":"{read_only:false; response_revision:204; number_of_response:1; }","duration":"100.594994ms","start":"2025-11-24T09:05:03.458371Z","end":"2025-11-24T09:05:03.558966Z","steps":["trace[627044044] 'process raft request' (duration: 98.72439ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:11.43815Z","caller":"traceutil/trace.go:171","msg":"trace[324713988] transaction","detail":"{read_only:false; response_revision:302; number_of_response:1; }","duration":"136.243687ms","start":"2025-11-24T09:05:11.301878Z","end":"2025-11-24T09:05:11.438122Z","steps":["trace[324713988] 'process raft request' (duration: 135.577137ms)"],"step_count":1}
==> kernel <==
09:05:44 up 3:48, 0 user, load average: 4.43, 3.43, 10.79
Linux old-k8s-version-128377 6.8.0-1044-gcp #47~22.04.1-Ubuntu SMP Thu Oct 23 21:07:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [818537e08c0605796949e72c73a034b7d5f104ce598d4a12f0ed8bf30de9c646] <==
I1124 09:05:21.342277 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1124 09:05:21.342547 1 main.go:139] hostIP = 192.168.103.2
podIP = 192.168.103.2
I1124 09:05:21.342705 1 main.go:148] setting mtu 1500 for CNI
I1124 09:05:21.342728 1 main.go:178] kindnetd IP family: "ipv4"
I1124 09:05:21.342756 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-24T09:05:21Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1124 09:05:21.545109 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1124 09:05:21.545137 1 controller.go:381] "Waiting for informer caches to sync"
I1124 09:05:21.545150 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1124 09:05:21.545827 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1124 09:05:22.046295 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1124 09:05:22.046329 1 metrics.go:72] Registering metrics
I1124 09:05:22.046391 1 controller.go:711] "Syncing nftables rules"
I1124 09:05:31.547663 1 main.go:297] Handling node with IPs: map[192.168.103.2:{}]
I1124 09:05:31.547728 1 main.go:301] handling current node
I1124 09:05:41.547315 1 main.go:297] Handling node with IPs: map[192.168.103.2:{}]
I1124 09:05:41.547363 1 main.go:301] handling current node
==> kube-apiserver [842bd9db2d84b65b054e4b006bfb9c11b98ac3cdcbe13cd821183480cd046d8a] <==
I1124 09:05:01.506809 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1124 09:05:01.506838 1 aggregator.go:166] initial CRD sync complete...
I1124 09:05:01.506846 1 autoregister_controller.go:141] Starting autoregister controller
I1124 09:05:01.506863 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1124 09:05:01.506869 1 cache.go:39] Caches are synced for autoregister controller
I1124 09:05:01.508109 1 controller.go:624] quota admission added evaluator for: namespaces
I1124 09:05:01.508757 1 shared_informer.go:318] Caches are synced for configmaps
E1124 09:05:01.537227 1 controller.go:146] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms"
I1124 09:05:01.741694 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1124 09:05:02.411561 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1124 09:05:02.415133 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1124 09:05:02.415155 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1124 09:05:02.826831 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1124 09:05:02.865354 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1124 09:05:02.945781 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1124 09:05:02.951178 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.103.2]
I1124 09:05:02.952085 1 controller.go:624] quota admission added evaluator for: endpoints
I1124 09:05:02.955858 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1124 09:05:03.457945 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1124 09:05:04.197911 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1124 09:05:04.208245 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1124 09:05:04.218442 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1124 09:05:17.015236 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1124 09:05:17.165046 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1124 09:05:17.165047 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [5d9ec22e03b8b0446d34a5b300037519eb0aa0be6b1e6c451907abb271f71839] <==
I1124 09:05:16.510194 1 node_lifecycle_controller.go:877] "Missing timestamp for Node. Assuming now as a timestamp" node="old-k8s-version-128377"
I1124 09:05:16.510252 1 node_lifecycle_controller.go:1029] "Controller detected that all Nodes are not-Ready. Entering master disruption mode"
I1124 09:05:16.516579 1 shared_informer.go:318] Caches are synced for resource quota
I1124 09:05:16.831807 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 09:05:16.890844 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 09:05:16.890883 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1124 09:05:17.019027 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1124 09:05:17.175390 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-gbp66"
I1124 09:05:17.176958 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-fpbs2"
I1124 09:05:17.325895 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-vxxnm"
I1124 09:05:17.332721 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-x5sl2"
I1124 09:05:17.343264 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="324.364712ms"
I1124 09:05:17.351654 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="8.320995ms"
I1124 09:05:17.351793 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="83.483µs"
I1124 09:05:17.672071 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1124 09:05:17.682409 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-x5sl2"
I1124 09:05:17.690482 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="20.456609ms"
I1124 09:05:17.698725 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="8.176655ms"
I1124 09:05:17.698851 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="86.584µs"
I1124 09:05:31.598337 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="79.212µs"
I1124 09:05:31.631586 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="116.266µs"
I1124 09:05:32.360508 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="141.431µs"
I1124 09:05:32.386954 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="6.987919ms"
I1124 09:05:32.387048 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="62.305µs"
I1124 09:05:36.514110 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [370631aaaf577fb6a343282108f71bb03e72ef6024de9d9f8e2a2eeb7e16e746] <==
I1124 09:05:17.831726 1 server_others.go:69] "Using iptables proxy"
I1124 09:05:17.841216 1 node.go:141] Successfully retrieved node IP: 192.168.103.2
I1124 09:05:17.866087 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1124 09:05:17.868989 1 server_others.go:152] "Using iptables Proxier"
I1124 09:05:17.869038 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1124 09:05:17.869048 1 server_others.go:438] "Defaulting to no-op detect-local"
I1124 09:05:17.869091 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1124 09:05:17.869396 1 server.go:846] "Version info" version="v1.28.0"
I1124 09:05:17.869419 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1124 09:05:17.870089 1 config.go:188] "Starting service config controller"
I1124 09:05:17.870115 1 config.go:315] "Starting node config controller"
I1124 09:05:17.870130 1 shared_informer.go:311] Waiting for caches to sync for service config
I1124 09:05:17.870125 1 config.go:97] "Starting endpoint slice config controller"
I1124 09:05:17.870157 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1124 09:05:17.870135 1 shared_informer.go:311] Waiting for caches to sync for node config
I1124 09:05:17.970983 1 shared_informer.go:318] Caches are synced for endpoint slice config
I1124 09:05:17.970991 1 shared_informer.go:318] Caches are synced for service config
I1124 09:05:17.970967 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [f5eddecfb179fe94de6b3892600fc1870efa5679c82874d72a3b301753e6f7d4] <==
E1124 09:05:01.478877 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1124 09:05:01.478878 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1124 09:05:01.478887 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1124 09:05:01.478907 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1124 09:05:01.478997 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1124 09:05:01.479055 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1124 09:05:01.479077 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1124 09:05:01.479125 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1124 09:05:02.313819 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1124 09:05:02.313863 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1124 09:05:02.319417 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1124 09:05:02.319451 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1124 09:05:02.429310 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1124 09:05:02.429356 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1124 09:05:02.538603 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1124 09:05:02.538660 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1124 09:05:02.549098 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1124 09:05:02.549140 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1124 09:05:02.661900 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1124 09:05:02.661937 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W1124 09:05:02.666268 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1124 09:05:02.666312 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1124 09:05:02.688142 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1124 09:05:02.688189 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
I1124 09:05:03.073951 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 24 09:05:16 old-k8s-version-128377 kubelet[1521]: I1124 09:05:16.342896 1521 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.183175 1521 topology_manager.go:215] "Topology Admit Handler" podUID="52128126-550d-4795-9fa1-e1d3d9510dd3" podNamespace="kube-system" podName="kube-proxy-fpbs2"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.188113 1521 topology_manager.go:215] "Topology Admit Handler" podUID="49954742-ea7f-466f-80d8-7d6ac88ce36c" podNamespace="kube-system" podName="kindnet-gbp66"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338200 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzbjt\" (UniqueName: \"kubernetes.io/projected/52128126-550d-4795-9fa1-e1d3d9510dd3-kube-api-access-vzbjt\") pod \"kube-proxy-fpbs2\" (UID: \"52128126-550d-4795-9fa1-e1d3d9510dd3\") " pod="kube-system/kube-proxy-fpbs2"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338280 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/49954742-ea7f-466f-80d8-7d6ac88ce36c-cni-cfg\") pod \"kindnet-gbp66\" (UID: \"49954742-ea7f-466f-80d8-7d6ac88ce36c\") " pod="kube-system/kindnet-gbp66"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338319 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/52128126-550d-4795-9fa1-e1d3d9510dd3-lib-modules\") pod \"kube-proxy-fpbs2\" (UID: \"52128126-550d-4795-9fa1-e1d3d9510dd3\") " pod="kube-system/kube-proxy-fpbs2"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338351 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/49954742-ea7f-466f-80d8-7d6ac88ce36c-lib-modules\") pod \"kindnet-gbp66\" (UID: \"49954742-ea7f-466f-80d8-7d6ac88ce36c\") " pod="kube-system/kindnet-gbp66"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338392 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/52128126-550d-4795-9fa1-e1d3d9510dd3-kube-proxy\") pod \"kube-proxy-fpbs2\" (UID: \"52128126-550d-4795-9fa1-e1d3d9510dd3\") " pod="kube-system/kube-proxy-fpbs2"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338424 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/49954742-ea7f-466f-80d8-7d6ac88ce36c-xtables-lock\") pod \"kindnet-gbp66\" (UID: \"49954742-ea7f-466f-80d8-7d6ac88ce36c\") " pod="kube-system/kindnet-gbp66"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338473 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cd5l7\" (UniqueName: \"kubernetes.io/projected/49954742-ea7f-466f-80d8-7d6ac88ce36c-kube-api-access-cd5l7\") pod \"kindnet-gbp66\" (UID: \"49954742-ea7f-466f-80d8-7d6ac88ce36c\") " pod="kube-system/kindnet-gbp66"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338537 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/52128126-550d-4795-9fa1-e1d3d9510dd3-xtables-lock\") pod \"kube-proxy-fpbs2\" (UID: \"52128126-550d-4795-9fa1-e1d3d9510dd3\") " pod="kube-system/kube-proxy-fpbs2"
Nov 24 09:05:18 old-k8s-version-128377 kubelet[1521]: I1124 09:05:18.914069 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-fpbs2" podStartSLOduration=1.913988204 podCreationTimestamp="2025-11-24 09:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 09:05:18.331224336 +0000 UTC m=+14.156867889" watchObservedRunningTime="2025-11-24 09:05:18.913988204 +0000 UTC m=+14.739631764"
Nov 24 09:05:21 old-k8s-version-128377 kubelet[1521]: I1124 09:05:21.337175 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-gbp66" podStartSLOduration=1.258069975 podCreationTimestamp="2025-11-24 09:05:17 +0000 UTC" firstStartedPulling="2025-11-24 09:05:17.956037798 +0000 UTC m=+13.781681343" lastFinishedPulling="2025-11-24 09:05:21.035088666 +0000 UTC m=+16.860732211" observedRunningTime="2025-11-24 09:05:21.33698865 +0000 UTC m=+17.162632223" watchObservedRunningTime="2025-11-24 09:05:21.337120843 +0000 UTC m=+17.162764404"
Nov 24 09:05:31 old-k8s-version-128377 kubelet[1521]: I1124 09:05:31.576686 1521 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 24 09:05:31 old-k8s-version-128377 kubelet[1521]: I1124 09:05:31.597206 1521 topology_manager.go:215] "Topology Admit Handler" podUID="7e4f56c0-0b49-47cd-9278-129ad898b781" podNamespace="kube-system" podName="storage-provisioner"
Nov 24 09:05:31 old-k8s-version-128377 kubelet[1521]: I1124 09:05:31.598949 1521 topology_manager.go:215] "Topology Admit Handler" podUID="b84bae0f-9f75-4d1c-b2ed-da0c10a141cf" podNamespace="kube-system" podName="coredns-5dd5756b68-vxxnm"
Nov 24 09:05:31 old-k8s-version-128377 kubelet[1521]: I1124 09:05:31.745876 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/7e4f56c0-0b49-47cd-9278-129ad898b781-tmp\") pod \"storage-provisioner\" (UID: \"7e4f56c0-0b49-47cd-9278-129ad898b781\") " pod="kube-system/storage-provisioner"
Nov 24 09:05:31 old-k8s-version-128377 kubelet[1521]: I1124 09:05:31.746005 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b84bae0f-9f75-4d1c-b2ed-da0c10a141cf-config-volume\") pod \"coredns-5dd5756b68-vxxnm\" (UID: \"b84bae0f-9f75-4d1c-b2ed-da0c10a141cf\") " pod="kube-system/coredns-5dd5756b68-vxxnm"
Nov 24 09:05:31 old-k8s-version-128377 kubelet[1521]: I1124 09:05:31.746049 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s87ck\" (UniqueName: \"kubernetes.io/projected/b84bae0f-9f75-4d1c-b2ed-da0c10a141cf-kube-api-access-s87ck\") pod \"coredns-5dd5756b68-vxxnm\" (UID: \"b84bae0f-9f75-4d1c-b2ed-da0c10a141cf\") " pod="kube-system/coredns-5dd5756b68-vxxnm"
Nov 24 09:05:31 old-k8s-version-128377 kubelet[1521]: I1124 09:05:31.746075 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mp79g\" (UniqueName: \"kubernetes.io/projected/7e4f56c0-0b49-47cd-9278-129ad898b781-kube-api-access-mp79g\") pod \"storage-provisioner\" (UID: \"7e4f56c0-0b49-47cd-9278-129ad898b781\") " pod="kube-system/storage-provisioner"
Nov 24 09:05:32 old-k8s-version-128377 kubelet[1521]: I1124 09:05:32.360059 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-vxxnm" podStartSLOduration=15.360007602 podCreationTimestamp="2025-11-24 09:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 09:05:32.35995945 +0000 UTC m=+28.185603012" watchObservedRunningTime="2025-11-24 09:05:32.360007602 +0000 UTC m=+28.185651165"
Nov 24 09:05:32 old-k8s-version-128377 kubelet[1521]: I1124 09:05:32.379733 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=15.379681272 podCreationTimestamp="2025-11-24 09:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 09:05:32.370112867 +0000 UTC m=+28.195756426" watchObservedRunningTime="2025-11-24 09:05:32.379681272 +0000 UTC m=+28.205324835"
Nov 24 09:05:34 old-k8s-version-128377 kubelet[1521]: I1124 09:05:34.439352 1521 topology_manager.go:215] "Topology Admit Handler" podUID="bfaec734-d874-4dcb-b31f-feb87adccfca" podNamespace="default" podName="busybox"
Nov 24 09:05:34 old-k8s-version-128377 kubelet[1521]: I1124 09:05:34.561236 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwqg6\" (UniqueName: \"kubernetes.io/projected/bfaec734-d874-4dcb-b31f-feb87adccfca-kube-api-access-qwqg6\") pod \"busybox\" (UID: \"bfaec734-d874-4dcb-b31f-feb87adccfca\") " pod="default/busybox"
Nov 24 09:05:38 old-k8s-version-128377 kubelet[1521]: I1124 09:05:38.375611 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.866491732 podCreationTimestamp="2025-11-24 09:05:34 +0000 UTC" firstStartedPulling="2025-11-24 09:05:34.853152472 +0000 UTC m=+30.678796027" lastFinishedPulling="2025-11-24 09:05:37.362217947 +0000 UTC m=+33.187861503" observedRunningTime="2025-11-24 09:05:38.375372923 +0000 UTC m=+34.201016485" watchObservedRunningTime="2025-11-24 09:05:38.375557208 +0000 UTC m=+34.201200770"
==> storage-provisioner [a9a5857553e67019e47641c1970bb0d5555afd6b608c94a94501dd485efac0c4] <==
I1124 09:05:32.081185 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1124 09:05:32.090604 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1124 09:05:32.090641 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1124 09:05:32.097885 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1124 09:05:32.097963 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"742d8911-ea16-4251-8cf0-6f909959732d", APIVersion:"v1", ResourceVersion:"433", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-128377_807761f2-87be-4f83-a3e6-a9218ea13b30 became leader
I1124 09:05:32.098144 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-128377_807761f2-87be-4f83-a3e6-a9218ea13b30!
I1124 09:05:32.198942 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-128377_807761f2-87be-4f83-a3e6-a9218ea13b30!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-128377 -n old-k8s-version-128377
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-128377 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-128377
helpers_test.go:243: (dbg) docker inspect old-k8s-version-128377:
-- stdout --
[
{
"Id": "2f10becef58704f5e7bd5cb0836d9f1660358d1387d26e05576d2fc9439102c7",
"Created": "2025-11-24T09:04:51.081869704Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 696955,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-24T09:04:51.124349133Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:133ca4ac39008d0056ad45d8cb70521d6b70d6e1b8bbff4678fd4b354efbdf70",
"ResolvConfPath": "/var/lib/docker/containers/2f10becef58704f5e7bd5cb0836d9f1660358d1387d26e05576d2fc9439102c7/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/2f10becef58704f5e7bd5cb0836d9f1660358d1387d26e05576d2fc9439102c7/hostname",
"HostsPath": "/var/lib/docker/containers/2f10becef58704f5e7bd5cb0836d9f1660358d1387d26e05576d2fc9439102c7/hosts",
"LogPath": "/var/lib/docker/containers/2f10becef58704f5e7bd5cb0836d9f1660358d1387d26e05576d2fc9439102c7/2f10becef58704f5e7bd5cb0836d9f1660358d1387d26e05576d2fc9439102c7-json.log",
"Name": "/old-k8s-version-128377",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-128377:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-128377",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "2f10becef58704f5e7bd5cb0836d9f1660358d1387d26e05576d2fc9439102c7",
"LowerDir": "/var/lib/docker/overlay2/1b1691990697dca2c1039c44453446d25814644b5c2e14c7ed7f94a719a51d83-init/diff:/var/lib/docker/overlay2/a062700147ad5d1f8f2a68f70ba6ad34ea6495dd365bcb265ab17ea27961837b/diff",
"MergedDir": "/var/lib/docker/overlay2/1b1691990697dca2c1039c44453446d25814644b5c2e14c7ed7f94a719a51d83/merged",
"UpperDir": "/var/lib/docker/overlay2/1b1691990697dca2c1039c44453446d25814644b5c2e14c7ed7f94a719a51d83/diff",
"WorkDir": "/var/lib/docker/overlay2/1b1691990697dca2c1039c44453446d25814644b5c2e14c7ed7f94a719a51d83/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-128377",
"Source": "/var/lib/docker/volumes/old-k8s-version-128377/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-128377",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-128377",
"name.minikube.sigs.k8s.io": "old-k8s-version-128377",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "1b825735b854737d663311b12a71789ec27a2117f701b1d752b938a4e9f325be",
"SandboxKey": "/var/run/docker/netns/1b825735b854",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33068"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33069"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33072"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33070"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33071"
}
]
},
"Networks": {
"old-k8s-version-128377": {
"IPAMConfig": {
"IPv4Address": "192.168.103.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "5e2ac3220d9f4f0222496592b8e5141116283ec11109477dec7a51401ec91c02",
"EndpointID": "4ad14cff7e04c8fe264f407478b59f88dc3ab8d1c7ab17924a24adb832eca462",
"Gateway": "192.168.103.1",
"IPAddress": "192.168.103.2",
"MacAddress": "be:3f:51:5a:9c:89",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-128377",
"2f10becef587"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-128377 -n old-k8s-version-128377
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-128377 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-128377 logs -n 25: (1.19868846s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬────────────────────────┬─────────┬─────────┬─────────────────────┬──────────────
───────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼────────────────────────┼─────────┼─────────┼─────────────────────┼──────────────
───────┤
│ ssh │ -p cilium-203355 sudo journalctl -xeu kubelet --all --full --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cat /etc/kubernetes/kubelet.conf │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cat /var/lib/kubelet/config.yaml │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo systemctl status docker --all --full --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ delete │ -p missing-upgrade-058813 │ missing-upgrade-058813 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ 24 Nov 25 09:04 UTC │
│ ssh │ -p cilium-203355 sudo systemctl cat docker --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cat /etc/docker/daemon.json │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo docker system info │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo systemctl status cri-docker --all --full --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo systemctl cat cri-docker --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cri-dockerd --version │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo systemctl status containerd --all --full --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo systemctl cat containerd --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cat /lib/systemd/system/containerd.service │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo cat /etc/containerd/config.toml │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo containerd config dump │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo systemctl status crio --all --full --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo systemctl cat crio --no-pager │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ ssh │ -p cilium-203355 sudo crio config │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ │
│ delete │ -p cilium-203355 │ cilium-203355 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ 24 Nov 25 09:04 UTC │
│ start │ -p old-k8s-version-128377 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-128377 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ 24 Nov 25 09:05 UTC │
│ start │ -p no-preload-820576 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.35.0-beta.0 │ no-preload-820576 │ jenkins │ v1.37.0 │ 24 Nov 25 09:04 UTC │ 24 Nov 25 09:05 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴────────────────────────┴─────────┴─────────┴─────────────────────┴──────────────
───────┘
==> Last Start <==
Log file created at: 2025/11/24 09:04:47
Running on machine: ubuntu-20-agent-10
Binary: Built with gc go1.25.3 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1124 09:04:47.686335 696018 out.go:360] Setting OutFile to fd 1 ...
I1124 09:04:47.686445 696018 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 09:04:47.686456 696018 out.go:374] Setting ErrFile to fd 2...
I1124 09:04:47.686474 696018 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 09:04:47.686683 696018 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21978-435860/.minikube/bin
I1124 09:04:47.687133 696018 out.go:368] Setting JSON to false
I1124 09:04:47.688408 696018 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-10","uptime":13624,"bootTime":1763961464,"procs":294,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1124 09:04:47.688532 696018 start.go:143] virtualization: kvm guest
I1124 09:04:47.690354 696018 out.go:179] * [no-preload-820576] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1124 09:04:47.691472 696018 out.go:179] - MINIKUBE_LOCATION=21978
I1124 09:04:47.691501 696018 notify.go:221] Checking for updates...
I1124 09:04:47.693590 696018 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1124 09:04:47.694681 696018 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21978-435860/kubeconfig
I1124 09:04:47.695683 696018 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21978-435860/.minikube
I1124 09:04:47.697109 696018 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1124 09:04:47.698248 696018 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1124 09:04:47.699807 696018 config.go:182] Loaded profile config "cert-expiration-869306": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.2
I1124 09:04:47.699947 696018 config.go:182] Loaded profile config "kubernetes-upgrade-521313": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.35.0-beta.0
I1124 09:04:47.700091 696018 config.go:182] Loaded profile config "old-k8s-version-128377": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 09:04:47.700236 696018 driver.go:422] Setting default libvirt URI to qemu:///system
I1124 09:04:47.724639 696018 docker.go:124] docker version: linux-29.0.2:Docker Engine - Community
I1124 09:04:47.724770 696018 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 09:04:47.791833 696018 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:2 ContainersRunning:2 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:52 OomKillDisable:false NGoroutines:66 SystemTime:2025-11-24 09:04:47.780432821 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1124 09:04:47.791998 696018 docker.go:319] overlay module found
I1124 09:04:47.794089 696018 out.go:179] * Using the docker driver based on user configuration
I1124 09:04:47.795621 696018 start.go:309] selected driver: docker
I1124 09:04:47.795639 696018 start.go:927] validating driver "docker" against <nil>
I1124 09:04:47.795651 696018 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1124 09:04:47.796325 696018 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 09:04:47.859511 696018 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:2 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:61 OomKillDisable:false NGoroutines:86 SystemTime:2025-11-24 09:04:47.848833175 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1124 09:04:47.859748 696018 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1124 09:04:47.859957 696018 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 09:04:47.861778 696018 out.go:179] * Using Docker driver with root privileges
I1124 09:04:47.862632 696018 cni.go:84] Creating CNI manager for ""
I1124 09:04:47.862696 696018 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 09:04:47.862708 696018 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1124 09:04:47.862775 696018 start.go:353] cluster config:
{Name:no-preload-820576 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0-beta.0 ClusterName:no-preload-820576 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local C
ontainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP
: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 09:04:47.863875 696018 out.go:179] * Starting "no-preload-820576" primary control-plane node in "no-preload-820576" cluster
I1124 09:04:47.864812 696018 cache.go:134] Beginning downloading kic base image for docker with containerd
I1124 09:04:47.865865 696018 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1124 09:04:47.866835 696018 preload.go:188] Checking if preload exists for k8s version v1.35.0-beta.0 and runtime containerd
I1124 09:04:47.866921 696018 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1124 09:04:47.866958 696018 profile.go:143] Saving config to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/config.json ...
I1124 09:04:47.867001 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/config.json: {Name:mk04f43d651118a00ac1be32029cffb149669d46 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:47.867208 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:04:47.890231 696018 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1124 09:04:47.890260 696018 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1124 09:04:47.890281 696018 cache.go:243] Successfully downloaded all kic artifacts
I1124 09:04:47.890321 696018 start.go:360] acquireMachinesLock for no-preload-820576: {Name:mk6b6fb581999217c645edacaa9c18971e97964f Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:47.890432 696018 start.go:364] duration metric: took 88.402µs to acquireMachinesLock for "no-preload-820576"
I1124 09:04:47.890474 696018 start.go:93] Provisioning new machine with config: &{Name:no-preload-820576 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0-beta.0 ClusterName:no-preload-820576 Namespace:default APIServerHAVIP: AP
IServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNS
Log:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 09:04:47.890567 696018 start.go:125] createHost starting for "" (driver="docker")
I1124 09:04:48.739369 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": read tcp 192.168.76.1:40906->192.168.76.2:8443: read: connection reset by peer
I1124 09:04:48.739430 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:04:48.740184 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:04:48.920539 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:04:48.921019 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:04:49.420530 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:04:49.420996 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:04:46.813535 695520 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1124 09:04:46.813778 695520 start.go:159] libmachine.API.Create for "old-k8s-version-128377" (driver="docker")
I1124 09:04:46.813816 695520 client.go:173] LocalClient.Create starting
I1124 09:04:46.813892 695520 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem
I1124 09:04:46.813936 695520 main.go:143] libmachine: Decoding PEM data...
I1124 09:04:46.813967 695520 main.go:143] libmachine: Parsing certificate...
I1124 09:04:46.814043 695520 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem
I1124 09:04:46.814076 695520 main.go:143] libmachine: Decoding PEM data...
I1124 09:04:46.814095 695520 main.go:143] libmachine: Parsing certificate...
I1124 09:04:46.814441 695520 cli_runner.go:164] Run: docker network inspect old-k8s-version-128377 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1124 09:04:46.831913 695520 cli_runner.go:211] docker network inspect old-k8s-version-128377 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1124 09:04:46.831996 695520 network_create.go:284] running [docker network inspect old-k8s-version-128377] to gather additional debugging logs...
I1124 09:04:46.832018 695520 cli_runner.go:164] Run: docker network inspect old-k8s-version-128377
W1124 09:04:46.848875 695520 cli_runner.go:211] docker network inspect old-k8s-version-128377 returned with exit code 1
I1124 09:04:46.848912 695520 network_create.go:287] error running [docker network inspect old-k8s-version-128377]: docker network inspect old-k8s-version-128377: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-128377 not found
I1124 09:04:46.848928 695520 network_create.go:289] output of [docker network inspect old-k8s-version-128377]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-128377 not found
** /stderr **
I1124 09:04:46.849044 695520 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 09:04:46.866840 695520 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-c654f70fdf0e IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:12:f7:ca:91:9d:ad} reservation:<nil>}
I1124 09:04:46.867443 695520 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-f1081c4000c5 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:ea:b1:6d:32:2c:78} reservation:<nil>}
I1124 09:04:46.868124 695520 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-30fdd1988974 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:f2:59:2f:0a:61:81} reservation:<nil>}
I1124 09:04:46.868877 695520 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-6cd297979890 IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:62:91:f3:e4:95:17} reservation:<nil>}
I1124 09:04:46.869272 695520 network.go:211] skipping subnet 192.168.85.0/24 that is taken: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName:br-9bf62793deff IfaceIPv4:192.168.85.1 IfaceMTU:1500 IfaceMAC:0a:d1:a9:3b:89:29} reservation:<nil>}
I1124 09:04:46.869983 695520 network.go:211] skipping subnet 192.168.94.0/24 that is taken: &{IP:192.168.94.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.94.0/24 Gateway:192.168.94.1 ClientMin:192.168.94.2 ClientMax:192.168.94.254 Broadcast:192.168.94.255 IsPrivate:true Interface:{IfaceName:br-5fa0f78c53ad IfaceIPv4:192.168.94.1 IfaceMTU:1500 IfaceMAC:9e:96:d6:0a:fe:a6} reservation:<nil>}
I1124 09:04:46.870809 695520 network.go:206] using free private subnet 192.168.103.0/24: &{IP:192.168.103.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.103.0/24 Gateway:192.168.103.1 ClientMin:192.168.103.2 ClientMax:192.168.103.254 Broadcast:192.168.103.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001e158e0}
I1124 09:04:46.870832 695520 network_create.go:124] attempt to create docker network old-k8s-version-128377 192.168.103.0/24 with gateway 192.168.103.1 and MTU of 1500 ...
I1124 09:04:46.870880 695520 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.103.0/24 --gateway=192.168.103.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-128377 old-k8s-version-128377
I1124 09:04:46.993201 695520 network_create.go:108] docker network old-k8s-version-128377 192.168.103.0/24 created
I1124 09:04:46.993243 695520 kic.go:121] calculated static IP "192.168.103.2" for the "old-k8s-version-128377" container
I1124 09:04:46.993321 695520 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1124 09:04:47.015308 695520 cli_runner.go:164] Run: docker volume create old-k8s-version-128377 --label name.minikube.sigs.k8s.io=old-k8s-version-128377 --label created_by.minikube.sigs.k8s.io=true
I1124 09:04:47.034791 695520 oci.go:103] Successfully created a docker volume old-k8s-version-128377
I1124 09:04:47.034869 695520 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-128377-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-128377 --entrypoint /usr/bin/test -v old-k8s-version-128377:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1124 09:04:47.772927 695520 oci.go:107] Successfully prepared a docker volume old-k8s-version-128377
I1124 09:04:47.773023 695520 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 09:04:47.773041 695520 kic.go:194] Starting extracting preloaded images to volume ...
I1124 09:04:47.773133 695520 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21978-435860/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-128377:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir
I1124 09:04:50.987600 695520 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21978-435860/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-128377:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir: (3.214396647s)
I1124 09:04:50.987639 695520 kic.go:203] duration metric: took 3.214593361s to extract preloaded images to volume ...
W1124 09:04:50.987789 695520 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1124 09:04:50.987849 695520 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1124 09:04:50.987920 695520 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1124 09:04:51.061728 695520 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-128377 --name old-k8s-version-128377 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-128377 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-128377 --network old-k8s-version-128377 --ip 192.168.103.2 --volume old-k8s-version-128377:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1124 09:04:51.401514 695520 cli_runner.go:164] Run: docker container inspect old-k8s-version-128377 --format={{.State.Running}}
I1124 09:04:51.426748 695520 cli_runner.go:164] Run: docker container inspect old-k8s-version-128377 --format={{.State.Status}}
I1124 09:04:51.456228 695520 cli_runner.go:164] Run: docker exec old-k8s-version-128377 stat /var/lib/dpkg/alternatives/iptables
I1124 09:04:51.515517 695520 oci.go:144] the created container "old-k8s-version-128377" has a running status.
I1124 09:04:51.515571 695520 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa...
I1124 09:04:47.893309 696018 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1124 09:04:47.893645 696018 start.go:159] libmachine.API.Create for "no-preload-820576" (driver="docker")
I1124 09:04:47.893687 696018 client.go:173] LocalClient.Create starting
I1124 09:04:47.893789 696018 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem
I1124 09:04:47.893833 696018 main.go:143] libmachine: Decoding PEM data...
I1124 09:04:47.893861 696018 main.go:143] libmachine: Parsing certificate...
I1124 09:04:47.893953 696018 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem
I1124 09:04:47.893982 696018 main.go:143] libmachine: Decoding PEM data...
I1124 09:04:47.893999 696018 main.go:143] libmachine: Parsing certificate...
I1124 09:04:47.894436 696018 cli_runner.go:164] Run: docker network inspect no-preload-820576 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1124 09:04:47.915789 696018 cli_runner.go:211] docker network inspect no-preload-820576 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1124 09:04:47.915886 696018 network_create.go:284] running [docker network inspect no-preload-820576] to gather additional debugging logs...
I1124 09:04:47.915925 696018 cli_runner.go:164] Run: docker network inspect no-preload-820576
W1124 09:04:47.939725 696018 cli_runner.go:211] docker network inspect no-preload-820576 returned with exit code 1
I1124 09:04:47.939760 696018 network_create.go:287] error running [docker network inspect no-preload-820576]: docker network inspect no-preload-820576: exit status 1
stdout:
[]
stderr:
Error response from daemon: network no-preload-820576 not found
I1124 09:04:47.939788 696018 network_create.go:289] output of [docker network inspect no-preload-820576]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network no-preload-820576 not found
** /stderr **
I1124 09:04:47.939956 696018 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 09:04:47.960368 696018 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-c654f70fdf0e IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:12:f7:ca:91:9d:ad} reservation:<nil>}
I1124 09:04:47.961456 696018 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-f1081c4000c5 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:ea:b1:6d:32:2c:78} reservation:<nil>}
I1124 09:04:47.962397 696018 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-30fdd1988974 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:f2:59:2f:0a:61:81} reservation:<nil>}
I1124 09:04:47.963597 696018 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-6cd297979890 IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:62:91:f3:e4:95:17} reservation:<nil>}
I1124 09:04:47.964832 696018 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001e9cf50}
I1124 09:04:47.964868 696018 network_create.go:124] attempt to create docker network no-preload-820576 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1124 09:04:47.964929 696018 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=no-preload-820576 no-preload-820576
I1124 09:04:48.017684 696018 network_create.go:108] docker network no-preload-820576 192.168.85.0/24 created
I1124 09:04:48.017725 696018 kic.go:121] calculated static IP "192.168.85.2" for the "no-preload-820576" container
I1124 09:04:48.017804 696018 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1124 09:04:48.037793 696018 cli_runner.go:164] Run: docker volume create no-preload-820576 --label name.minikube.sigs.k8s.io=no-preload-820576 --label created_by.minikube.sigs.k8s.io=true
I1124 09:04:48.057638 696018 oci.go:103] Successfully created a docker volume no-preload-820576
I1124 09:04:48.057738 696018 cli_runner.go:164] Run: docker run --rm --name no-preload-820576-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-820576 --entrypoint /usr/bin/test -v no-preload-820576:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1124 09:04:48.192090 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:04:48.509962 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:04:48.827547 696018 cache.go:107] acquiring lock: {Name:mkbcabeb5a23ff077ffdad64c71e9fe699d94040 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.827544 696018 cache.go:107] acquiring lock: {Name:mk92c82896924ab47423467b25ccd98ee4128baa Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.827656 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 exists
I1124 09:04:48.827672 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.35.0-beta.0 exists
I1124 09:04:48.827672 696018 cache.go:96] cache image "gcr.io/k8s-minikube/storage-provisioner:v5" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5" took 138.757µs
I1124 09:04:48.827689 696018 cache.go:80] save to tar file gcr.io/k8s-minikube/storage-provisioner:v5 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 succeeded
I1124 09:04:48.827683 696018 cache.go:96] cache image "registry.k8s.io/kube-apiserver:v1.35.0-beta.0" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.35.0-beta.0" took 176.678µs
I1124 09:04:48.827708 696018 cache.go:80] save to tar file registry.k8s.io/kube-apiserver:v1.35.0-beta.0 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.35.0-beta.0 succeeded
I1124 09:04:48.827708 696018 cache.go:107] acquiring lock: {Name:mkf3a006b133f81ed32779d427a8d0a9b25f9000 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.827735 696018 cache.go:107] acquiring lock: {Name:mkd74819cb24442927f7fb2cffd47478de40e14c Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.827766 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 exists
I1124 09:04:48.827773 696018 cache.go:96] cache image "registry.k8s.io/pause:3.10.1" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1" took 69.196µs
I1124 09:04:48.827780 696018 cache.go:80] save to tar file registry.k8s.io/pause:3.10.1 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 succeeded
I1124 09:04:48.827788 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.5.24-0 exists
I1124 09:04:48.827796 696018 cache.go:96] cache image "registry.k8s.io/etcd:3.5.24-0" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.5.24-0" took 65.204µs
I1124 09:04:48.827804 696018 cache.go:80] save to tar file registry.k8s.io/etcd:3.5.24-0 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.5.24-0 succeeded
I1124 09:04:48.827791 696018 cache.go:107] acquiring lock: {Name:mk6b573bbd33cfc3c3f77668030fb064598572fd Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.827820 696018 cache.go:107] acquiring lock: {Name:mk7f052905284f586f4f1cf24b8c34cc48e0b85b Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.827866 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.35.0-beta.0 exists
I1124 09:04:48.827873 696018 cache.go:96] cache image "registry.k8s.io/kube-scheduler:v1.35.0-beta.0" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.35.0-beta.0" took 57.027µs
I1124 09:04:48.827882 696018 cache.go:80] save to tar file registry.k8s.io/kube-scheduler:v1.35.0-beta.0 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.35.0-beta.0 succeeded
I1124 09:04:48.827796 696018 cache.go:107] acquiring lock: {Name:mk1d635b72f6d026600360916178f900a450350e Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.827887 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.13.1 exists
I1124 09:04:48.827900 696018 cache.go:96] cache image "registry.k8s.io/coredns/coredns:v1.13.1" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.13.1" took 115.907µs
I1124 09:04:48.827910 696018 cache.go:80] save to tar file registry.k8s.io/coredns/coredns:v1.13.1 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.13.1 succeeded
I1124 09:04:48.827914 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.35.0-beta.0 exists
I1124 09:04:48.827921 696018 cache.go:96] cache image "registry.k8s.io/kube-controller-manager:v1.35.0-beta.0" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.35.0-beta.0" took 128.45µs
I1124 09:04:48.827937 696018 cache.go:80] save to tar file registry.k8s.io/kube-controller-manager:v1.35.0-beta.0 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.35.0-beta.0 succeeded
I1124 09:04:48.827719 696018 cache.go:107] acquiring lock: {Name:mk8023690ce5b18d9a1789b2f878bf92c1381799 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 09:04:48.828021 696018 cache.go:115] /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.35.0-beta.0 exists
I1124 09:04:48.828033 696018 cache.go:96] cache image "registry.k8s.io/kube-proxy:v1.35.0-beta.0" -> "/home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.35.0-beta.0" took 327.502µs
I1124 09:04:48.828051 696018 cache.go:80] save to tar file registry.k8s.io/kube-proxy:v1.35.0-beta.0 -> /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.35.0-beta.0 succeeded
I1124 09:04:48.828067 696018 cache.go:87] Successfully saved all images to host disk.
I1124 09:04:50.353018 696018 cli_runner.go:217] Completed: docker run --rm --name no-preload-820576-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-820576 --entrypoint /usr/bin/test -v no-preload-820576:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib: (2.295229864s)
I1124 09:04:50.353061 696018 oci.go:107] Successfully prepared a docker volume no-preload-820576
I1124 09:04:50.353130 696018 preload.go:188] Checking if preload exists for k8s version v1.35.0-beta.0 and runtime containerd
W1124 09:04:50.353205 696018 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1124 09:04:50.353233 696018 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1124 09:04:50.353275 696018 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1124 09:04:50.412447 696018 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname no-preload-820576 --name no-preload-820576 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-820576 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=no-preload-820576 --network no-preload-820576 --ip 192.168.85.2 --volume no-preload-820576:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1124 09:04:51.174340 696018 cli_runner.go:164] Run: docker container inspect no-preload-820576 --format={{.State.Running}}
I1124 09:04:51.195074 696018 cli_runner.go:164] Run: docker container inspect no-preload-820576 --format={{.State.Status}}
I1124 09:04:51.216706 696018 cli_runner.go:164] Run: docker exec no-preload-820576 stat /var/lib/dpkg/alternatives/iptables
I1124 09:04:51.270513 696018 oci.go:144] the created container "no-preload-820576" has a running status.
I1124 09:04:51.270555 696018 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa...
I1124 09:04:51.639069 696018 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1124 09:04:51.669871 696018 cli_runner.go:164] Run: docker container inspect no-preload-820576 --format={{.State.Status}}
I1124 09:04:51.693409 696018 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1124 09:04:51.693441 696018 kic_runner.go:114] Args: [docker exec --privileged no-preload-820576 chown docker:docker /home/docker/.ssh/authorized_keys]
I1124 09:04:51.754414 696018 cli_runner.go:164] Run: docker container inspect no-preload-820576 --format={{.State.Status}}
I1124 09:04:51.781590 696018 machine.go:94] provisionDockerMachine start ...
I1124 09:04:51.781685 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:51.808597 696018 main.go:143] libmachine: Using SSH client type: native
I1124 09:04:51.809054 696018 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1124 09:04:51.809092 696018 main.go:143] libmachine: About to run SSH command:
hostname
I1124 09:04:51.963230 696018 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-820576
I1124 09:04:51.963276 696018 ubuntu.go:182] provisioning hostname "no-preload-820576"
I1124 09:04:51.963339 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:51.984069 696018 main.go:143] libmachine: Using SSH client type: native
I1124 09:04:51.984406 696018 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1124 09:04:51.984432 696018 main.go:143] libmachine: About to run SSH command:
sudo hostname no-preload-820576 && echo "no-preload-820576" | sudo tee /etc/hostname
I1124 09:04:52.142431 696018 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-820576
I1124 09:04:52.142545 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:52.163141 696018 main.go:143] libmachine: Using SSH client type: native
I1124 09:04:52.163483 696018 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1124 09:04:52.163520 696018 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sno-preload-820576' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 no-preload-820576/g' /etc/hosts;
else
echo '127.0.1.1 no-preload-820576' | sudo tee -a /etc/hosts;
fi
fi
I1124 09:04:52.313074 696018 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1124 09:04:52.313103 696018 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21978-435860/.minikube CaCertPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21978-435860/.minikube}
I1124 09:04:52.313151 696018 ubuntu.go:190] setting up certificates
I1124 09:04:52.313174 696018 provision.go:84] configureAuth start
I1124 09:04:52.313241 696018 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-820576
I1124 09:04:52.333178 696018 provision.go:143] copyHostCerts
I1124 09:04:52.333250 696018 exec_runner.go:144] found /home/jenkins/minikube-integration/21978-435860/.minikube/ca.pem, removing ...
I1124 09:04:52.333267 696018 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21978-435860/.minikube/ca.pem
I1124 09:04:52.333340 696018 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21978-435860/.minikube/ca.pem (1082 bytes)
I1124 09:04:52.333454 696018 exec_runner.go:144] found /home/jenkins/minikube-integration/21978-435860/.minikube/cert.pem, removing ...
I1124 09:04:52.333479 696018 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21978-435860/.minikube/cert.pem
I1124 09:04:52.333527 696018 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21978-435860/.minikube/cert.pem (1123 bytes)
I1124 09:04:52.333610 696018 exec_runner.go:144] found /home/jenkins/minikube-integration/21978-435860/.minikube/key.pem, removing ...
I1124 09:04:52.333631 696018 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21978-435860/.minikube/key.pem
I1124 09:04:52.333670 696018 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21978-435860/.minikube/key.pem (1675 bytes)
I1124 09:04:52.333745 696018 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21978-435860/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca-key.pem org=jenkins.no-preload-820576 san=[127.0.0.1 192.168.85.2 localhost minikube no-preload-820576]
I1124 09:04:52.372869 696018 provision.go:177] copyRemoteCerts
I1124 09:04:52.372936 696018 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1124 09:04:52.372984 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:52.391516 696018 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa Username:docker}
I1124 09:04:52.495715 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1124 09:04:52.515508 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1124 09:04:52.533110 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1124 09:04:52.549620 696018 provision.go:87] duration metric: took 236.431147ms to configureAuth
I1124 09:04:52.549643 696018 ubuntu.go:206] setting minikube options for container-runtime
I1124 09:04:52.549785 696018 config.go:182] Loaded profile config "no-preload-820576": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.35.0-beta.0
I1124 09:04:52.549795 696018 machine.go:97] duration metric: took 768.185276ms to provisionDockerMachine
I1124 09:04:52.549801 696018 client.go:176] duration metric: took 4.656107804s to LocalClient.Create
I1124 09:04:52.549817 696018 start.go:167] duration metric: took 4.656176839s to libmachine.API.Create "no-preload-820576"
I1124 09:04:52.549827 696018 start.go:293] postStartSetup for "no-preload-820576" (driver="docker")
I1124 09:04:52.549837 696018 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1124 09:04:52.549917 696018 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1124 09:04:52.549957 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:52.567598 696018 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa Username:docker}
I1124 09:04:52.670209 696018 ssh_runner.go:195] Run: cat /etc/os-release
I1124 09:04:52.673794 696018 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1124 09:04:52.673819 696018 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1124 09:04:52.673829 696018 filesync.go:126] Scanning /home/jenkins/minikube-integration/21978-435860/.minikube/addons for local assets ...
I1124 09:04:52.673873 696018 filesync.go:126] Scanning /home/jenkins/minikube-integration/21978-435860/.minikube/files for local assets ...
I1124 09:04:52.673954 696018 filesync.go:149] local asset: /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem -> 4395242.pem in /etc/ssl/certs
I1124 09:04:52.674055 696018 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1124 09:04:52.681571 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem --> /etc/ssl/certs/4395242.pem (1708 bytes)
I1124 09:04:51.668051 695520 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1124 09:04:51.701732 695520 cli_runner.go:164] Run: docker container inspect old-k8s-version-128377 --format={{.State.Status}}
I1124 09:04:51.724111 695520 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1124 09:04:51.724139 695520 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-128377 chown docker:docker /home/docker/.ssh/authorized_keys]
I1124 09:04:51.779671 695520 cli_runner.go:164] Run: docker container inspect old-k8s-version-128377 --format={{.State.Status}}
I1124 09:04:51.808240 695520 machine.go:94] provisionDockerMachine start ...
I1124 09:04:51.808514 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:51.833533 695520 main.go:143] libmachine: Using SSH client type: native
I1124 09:04:51.833868 695520 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33068 <nil> <nil>}
I1124 09:04:51.833890 695520 main.go:143] libmachine: About to run SSH command:
hostname
I1124 09:04:51.988683 695520 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-128377
I1124 09:04:51.988712 695520 ubuntu.go:182] provisioning hostname "old-k8s-version-128377"
I1124 09:04:51.988769 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:52.008953 695520 main.go:143] libmachine: Using SSH client type: native
I1124 09:04:52.009275 695520 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33068 <nil> <nil>}
I1124 09:04:52.009299 695520 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-128377 && echo "old-k8s-version-128377" | sudo tee /etc/hostname
I1124 09:04:52.164712 695520 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-128377
I1124 09:04:52.164811 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:52.184388 695520 main.go:143] libmachine: Using SSH client type: native
I1124 09:04:52.184674 695520 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33068 <nil> <nil>}
I1124 09:04:52.184701 695520 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-128377' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-128377/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-128377' | sudo tee -a /etc/hosts;
fi
fi
I1124 09:04:52.328284 695520 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1124 09:04:52.328315 695520 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21978-435860/.minikube CaCertPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21978-435860/.minikube}
I1124 09:04:52.328349 695520 ubuntu.go:190] setting up certificates
I1124 09:04:52.328371 695520 provision.go:84] configureAuth start
I1124 09:04:52.328437 695520 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-128377
I1124 09:04:52.347382 695520 provision.go:143] copyHostCerts
I1124 09:04:52.347441 695520 exec_runner.go:144] found /home/jenkins/minikube-integration/21978-435860/.minikube/ca.pem, removing ...
I1124 09:04:52.347449 695520 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21978-435860/.minikube/ca.pem
I1124 09:04:52.347530 695520 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21978-435860/.minikube/ca.pem (1082 bytes)
I1124 09:04:52.347615 695520 exec_runner.go:144] found /home/jenkins/minikube-integration/21978-435860/.minikube/cert.pem, removing ...
I1124 09:04:52.347624 695520 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21978-435860/.minikube/cert.pem
I1124 09:04:52.347646 695520 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21978-435860/.minikube/cert.pem (1123 bytes)
I1124 09:04:52.347699 695520 exec_runner.go:144] found /home/jenkins/minikube-integration/21978-435860/.minikube/key.pem, removing ...
I1124 09:04:52.347707 695520 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21978-435860/.minikube/key.pem
I1124 09:04:52.347724 695520 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21978-435860/.minikube/key.pem (1675 bytes)
I1124 09:04:52.347767 695520 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21978-435860/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-128377 san=[127.0.0.1 192.168.103.2 localhost minikube old-k8s-version-128377]
I1124 09:04:52.449836 695520 provision.go:177] copyRemoteCerts
I1124 09:04:52.449907 695520 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1124 09:04:52.449955 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:52.467389 695520 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33068 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa Username:docker}
I1124 09:04:52.568756 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1124 09:04:52.590911 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1124 09:04:52.608291 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1124 09:04:52.625476 695520 provision.go:87] duration metric: took 297.076146ms to configureAuth
I1124 09:04:52.625501 695520 ubuntu.go:206] setting minikube options for container-runtime
I1124 09:04:52.625684 695520 config.go:182] Loaded profile config "old-k8s-version-128377": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 09:04:52.625697 695520 machine.go:97] duration metric: took 817.329123ms to provisionDockerMachine
I1124 09:04:52.625703 695520 client.go:176] duration metric: took 5.811878386s to LocalClient.Create
I1124 09:04:52.625724 695520 start.go:167] duration metric: took 5.811947677s to libmachine.API.Create "old-k8s-version-128377"
I1124 09:04:52.625737 695520 start.go:293] postStartSetup for "old-k8s-version-128377" (driver="docker")
I1124 09:04:52.625751 695520 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1124 09:04:52.625805 695520 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1124 09:04:52.625861 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:52.643125 695520 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33068 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa Username:docker}
I1124 09:04:52.746507 695520 ssh_runner.go:195] Run: cat /etc/os-release
I1124 09:04:52.750419 695520 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1124 09:04:52.750446 695520 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1124 09:04:52.750471 695520 filesync.go:126] Scanning /home/jenkins/minikube-integration/21978-435860/.minikube/addons for local assets ...
I1124 09:04:52.750527 695520 filesync.go:126] Scanning /home/jenkins/minikube-integration/21978-435860/.minikube/files for local assets ...
I1124 09:04:52.750621 695520 filesync.go:149] local asset: /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem -> 4395242.pem in /etc/ssl/certs
I1124 09:04:52.750735 695520 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1124 09:04:52.759275 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem --> /etc/ssl/certs/4395242.pem (1708 bytes)
I1124 09:04:52.779524 695520 start.go:296] duration metric: took 153.769147ms for postStartSetup
I1124 09:04:52.779876 695520 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-128377
I1124 09:04:52.797331 695520 profile.go:143] Saving config to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/config.json ...
I1124 09:04:52.797607 695520 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1124 09:04:52.797652 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:52.814633 695520 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33068 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa Username:docker}
I1124 09:04:52.914421 695520 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1124 09:04:52.919231 695520 start.go:128] duration metric: took 6.107446039s to createHost
I1124 09:04:52.919259 695520 start.go:83] releasing machines lock for "old-k8s-version-128377", held for 6.10762389s
I1124 09:04:52.919326 695520 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-128377
I1124 09:04:52.937920 695520 ssh_runner.go:195] Run: cat /version.json
I1124 09:04:52.937964 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:52.937993 695520 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1124 09:04:52.938073 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:04:52.957005 695520 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33068 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa Username:docker}
I1124 09:04:52.957162 695520 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33068 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa Username:docker}
I1124 09:04:53.162492 695520 ssh_runner.go:195] Run: systemctl --version
I1124 09:04:53.168749 695520 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1124 09:04:53.173128 695520 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1124 09:04:53.173198 695520 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1124 09:04:53.196703 695520 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1124 09:04:53.196732 695520 start.go:496] detecting cgroup driver to use...
I1124 09:04:53.196770 695520 detect.go:190] detected "systemd" cgroup driver on host os
I1124 09:04:53.196824 695520 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1124 09:04:53.212821 695520 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1124 09:04:53.226105 695520 docker.go:218] disabling cri-docker service (if available) ...
I1124 09:04:53.226149 695520 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1124 09:04:53.245323 695520 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1124 09:04:53.261892 695520 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1124 09:04:53.346225 695520 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1124 09:04:53.440817 695520 docker.go:234] disabling docker service ...
I1124 09:04:53.440886 695520 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1124 09:04:53.466043 695520 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1124 09:04:53.478621 695520 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1124 09:04:53.566248 695520 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1124 09:04:53.652228 695520 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1124 09:04:53.665204 695520 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1124 09:04:53.679300 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1124 09:04:53.689354 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1124 09:04:53.697996 695520 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1124 09:04:53.698043 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1124 09:04:53.706349 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 09:04:53.715138 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1124 09:04:53.724198 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 09:04:53.732594 695520 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1124 09:04:53.740362 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1124 09:04:53.748766 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1124 09:04:53.757048 695520 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1124 09:04:53.765265 695520 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1124 09:04:53.772343 695520 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1124 09:04:53.779254 695520 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 09:04:53.856087 695520 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1124 09:04:53.959050 695520 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1124 09:04:53.959110 695520 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1124 09:04:53.963133 695520 start.go:564] Will wait 60s for crictl version
I1124 09:04:53.963185 695520 ssh_runner.go:195] Run: which crictl
I1124 09:04:53.966895 695520 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1124 09:04:53.994878 695520 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1124 09:04:53.994934 695520 ssh_runner.go:195] Run: containerd --version
I1124 09:04:54.021265 695520 ssh_runner.go:195] Run: containerd --version
I1124 09:04:54.045827 695520 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1124 09:04:52.701569 696018 start.go:296] duration metric: took 151.731915ms for postStartSetup
I1124 09:04:52.701858 696018 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-820576
I1124 09:04:52.719203 696018 profile.go:143] Saving config to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/config.json ...
I1124 09:04:52.719424 696018 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1124 09:04:52.719488 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:52.736084 696018 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa Username:docker}
I1124 09:04:52.835481 696018 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1124 09:04:52.840061 696018 start.go:128] duration metric: took 4.94947332s to createHost
I1124 09:04:52.840083 696018 start.go:83] releasing machines lock for "no-preload-820576", held for 4.94964132s
I1124 09:04:52.840148 696018 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-820576
I1124 09:04:52.858132 696018 ssh_runner.go:195] Run: cat /version.json
I1124 09:04:52.858160 696018 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1124 09:04:52.858222 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:52.858246 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:04:52.877130 696018 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa Username:docker}
I1124 09:04:52.877482 696018 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa Username:docker}
I1124 09:04:52.975607 696018 ssh_runner.go:195] Run: systemctl --version
I1124 09:04:53.031452 696018 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1124 09:04:53.036065 696018 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1124 09:04:53.036130 696018 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1124 09:04:53.059999 696018 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1124 09:04:53.060024 696018 start.go:496] detecting cgroup driver to use...
I1124 09:04:53.060062 696018 detect.go:190] detected "systemd" cgroup driver on host os
I1124 09:04:53.060105 696018 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1124 09:04:53.074505 696018 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1124 09:04:53.086089 696018 docker.go:218] disabling cri-docker service (if available) ...
I1124 09:04:53.086143 696018 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1124 09:04:53.101555 696018 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1124 09:04:53.118093 696018 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1124 09:04:53.204201 696018 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1124 09:04:53.300933 696018 docker.go:234] disabling docker service ...
I1124 09:04:53.301034 696018 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1124 09:04:53.320036 696018 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1124 09:04:53.331959 696018 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1124 09:04:53.420508 696018 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1124 09:04:53.513830 696018 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1124 09:04:53.526253 696018 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1124 09:04:53.540562 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:04:53.865082 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1124 09:04:53.876277 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1124 09:04:53.885584 696018 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1124 09:04:53.885655 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1124 09:04:53.895158 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 09:04:53.904766 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1124 09:04:53.913841 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 09:04:53.922747 696018 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1124 09:04:53.932360 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1124 09:04:53.943272 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1124 09:04:53.952416 696018 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1124 09:04:53.961850 696018 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1124 09:04:53.969795 696018 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1124 09:04:53.977270 696018 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 09:04:54.067216 696018 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1124 09:04:54.151776 696018 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1124 09:04:54.151849 696018 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1124 09:04:54.156309 696018 start.go:564] Will wait 60s for crictl version
I1124 09:04:54.156367 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:54.160683 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1124 09:04:54.187130 696018 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1124 09:04:54.187193 696018 ssh_runner.go:195] Run: containerd --version
I1124 09:04:54.208524 696018 ssh_runner.go:195] Run: containerd --version
I1124 09:04:54.233294 696018 out.go:179] * Preparing Kubernetes v1.35.0-beta.0 on containerd 2.1.5 ...
I1124 09:04:49.920675 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:04:49.921171 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:04:50.420805 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:04:50.421212 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:04:50.920534 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:04:54.046841 695520 cli_runner.go:164] Run: docker network inspect old-k8s-version-128377 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 09:04:54.064168 695520 ssh_runner.go:195] Run: grep 192.168.103.1 host.minikube.internal$ /etc/hosts
I1124 09:04:54.068915 695520 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.103.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 09:04:54.079411 695520 kubeadm.go:884] updating cluster {Name:old-k8s-version-128377 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-128377 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false C
ustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1124 09:04:54.079584 695520 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 09:04:54.079651 695520 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 09:04:54.105064 695520 containerd.go:627] all images are preloaded for containerd runtime.
I1124 09:04:54.105092 695520 containerd.go:534] Images already preloaded, skipping extraction
I1124 09:04:54.105153 695520 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 09:04:54.131723 695520 containerd.go:627] all images are preloaded for containerd runtime.
I1124 09:04:54.131746 695520 cache_images.go:86] Images are preloaded, skipping loading
I1124 09:04:54.131756 695520 kubeadm.go:935] updating node { 192.168.103.2 8443 v1.28.0 containerd true true} ...
I1124 09:04:54.131858 695520 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-128377 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.103.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-128377 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1124 09:04:54.131921 695520 ssh_runner.go:195] Run: sudo crictl info
I1124 09:04:54.160918 695520 cni.go:84] Creating CNI manager for ""
I1124 09:04:54.160940 695520 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 09:04:54.160955 695520 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1124 09:04:54.160976 695520 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.103.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-128377 NodeName:old-k8s-version-128377 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.103.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.103.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.cr
t StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1124 09:04:54.161123 695520 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.103.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-128377"
kubeletExtraArgs:
node-ip: 192.168.103.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.103.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1124 09:04:54.161190 695520 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1124 09:04:54.169102 695520 binaries.go:51] Found k8s binaries, skipping transfer
I1124 09:04:54.169150 695520 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1124 09:04:54.176962 695520 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (327 bytes)
I1124 09:04:54.191252 695520 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1124 09:04:54.206931 695520 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2178 bytes)
I1124 09:04:54.220958 695520 ssh_runner.go:195] Run: grep 192.168.103.2 control-plane.minikube.internal$ /etc/hosts
I1124 09:04:54.225158 695520 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.103.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 09:04:54.236116 695520 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 09:04:54.319599 695520 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 09:04:54.342135 695520 certs.go:69] Setting up /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377 for IP: 192.168.103.2
I1124 09:04:54.342157 695520 certs.go:195] generating shared ca certs ...
I1124 09:04:54.342176 695520 certs.go:227] acquiring lock for ca certs: {Name:mk977567029a87925dffc7f909bfa5f74bf239fc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:54.342355 695520 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21978-435860/.minikube/ca.key
I1124 09:04:54.342406 695520 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21978-435860/.minikube/proxy-client-ca.key
I1124 09:04:54.342416 695520 certs.go:257] generating profile certs ...
I1124 09:04:54.342497 695520 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/client.key
I1124 09:04:54.342513 695520 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/client.crt with IP's: []
I1124 09:04:54.488402 695520 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/client.crt ...
I1124 09:04:54.488432 695520 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/client.crt: {Name:mk87cd521056210340bc5798f0387b3f36dc4635 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:54.488613 695520 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/client.key ...
I1124 09:04:54.488628 695520 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/client.key: {Name:mk03c81f6da2f2b54dfd9fa0e30866e3372921ee Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:54.488712 695520 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.key.f2d0a0c1
I1124 09:04:54.488729 695520 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.crt.f2d0a0c1 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.103.2]
I1124 09:04:54.543616 695520 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.crt.f2d0a0c1 ...
I1124 09:04:54.543654 695520 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.crt.f2d0a0c1: {Name:mk2f5faeeb1a8cba2153625fbd7d3a7e54f95aaf Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:54.543851 695520 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.key.f2d0a0c1 ...
I1124 09:04:54.543873 695520 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.key.f2d0a0c1: {Name:mk7ed4cadcafdc2e1a661255372b702ae6719654 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:54.543964 695520 certs.go:382] copying /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.crt.f2d0a0c1 -> /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.crt
I1124 09:04:54.544040 695520 certs.go:386] copying /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.key.f2d0a0c1 -> /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.key
I1124 09:04:54.544132 695520 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.key
I1124 09:04:54.544150 695520 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.crt with IP's: []
I1124 09:04:54.594781 695520 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.crt ...
I1124 09:04:54.594837 695520 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.crt: {Name:mk33ff647329a0bdf714fd27ddf109ec15b6d483 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:54.595015 695520 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.key ...
I1124 09:04:54.595034 695520 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.key: {Name:mk9bf52d92c35c053f63b6073f2a38e1ff2182d9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:04:54.595287 695520 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/439524.pem (1338 bytes)
W1124 09:04:54.595344 695520 certs.go:480] ignoring /home/jenkins/minikube-integration/21978-435860/.minikube/certs/439524_empty.pem, impossibly tiny 0 bytes
I1124 09:04:54.595359 695520 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca-key.pem (1675 bytes)
I1124 09:04:54.595395 695520 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem (1082 bytes)
I1124 09:04:54.595433 695520 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem (1123 bytes)
I1124 09:04:54.595484 695520 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/key.pem (1675 bytes)
I1124 09:04:54.595553 695520 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem (1708 bytes)
I1124 09:04:54.596350 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1124 09:04:54.616384 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1124 09:04:54.633998 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1124 09:04:54.651552 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1124 09:04:54.669737 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1124 09:04:54.686876 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1124 09:04:54.703726 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1124 09:04:54.720840 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/old-k8s-version-128377/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1124 09:04:54.737534 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1124 09:04:54.757717 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/certs/439524.pem --> /usr/share/ca-certificates/439524.pem (1338 bytes)
I1124 09:04:54.774715 695520 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem --> /usr/share/ca-certificates/4395242.pem (1708 bytes)
I1124 09:04:54.791052 695520 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1124 09:04:54.802968 695520 ssh_runner.go:195] Run: openssl version
I1124 09:04:54.808893 695520 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1124 09:04:54.816748 695520 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1124 09:04:54.820220 695520 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 24 08:30 /usr/share/ca-certificates/minikubeCA.pem
I1124 09:04:54.820260 695520 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1124 09:04:54.854133 695520 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1124 09:04:54.862216 695520 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/439524.pem && ln -fs /usr/share/ca-certificates/439524.pem /etc/ssl/certs/439524.pem"
I1124 09:04:54.870277 695520 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/439524.pem
I1124 09:04:54.873860 695520 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 24 08:38 /usr/share/ca-certificates/439524.pem
I1124 09:04:54.873906 695520 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/439524.pem
I1124 09:04:54.910146 695520 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/439524.pem /etc/ssl/certs/51391683.0"
I1124 09:04:54.919148 695520 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/4395242.pem && ln -fs /usr/share/ca-certificates/4395242.pem /etc/ssl/certs/4395242.pem"
I1124 09:04:54.927753 695520 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/4395242.pem
I1124 09:04:54.931870 695520 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 24 08:38 /usr/share/ca-certificates/4395242.pem
I1124 09:04:54.931921 695520 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/4395242.pem
I1124 09:04:54.972285 695520 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/4395242.pem /etc/ssl/certs/3ec20f2e.0"
I1124 09:04:54.981223 695520 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1124 09:04:54.984999 695520 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1124 09:04:54.985067 695520 kubeadm.go:401] StartCluster: {Name:old-k8s-version-128377 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-128377 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 09:04:54.985165 695520 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1124 09:04:54.985213 695520 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1124 09:04:55.012874 695520 cri.go:89] found id: ""
I1124 09:04:55.012940 695520 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1124 09:04:55.020831 695520 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1124 09:04:55.029069 695520 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1124 09:04:55.029111 695520 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1124 09:04:55.036334 695520 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1124 09:04:55.036348 695520 kubeadm.go:158] found existing configuration files:
I1124 09:04:55.036384 695520 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1124 09:04:55.044532 695520 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1124 09:04:55.044579 695520 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1124 09:04:55.051885 695520 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1124 09:04:55.059335 695520 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1124 09:04:55.059381 695520 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1124 09:04:55.066924 695520 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1124 09:04:55.075157 695520 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1124 09:04:55.075202 695520 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1124 09:04:55.082536 695520 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1124 09:04:55.090276 695520 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1124 09:04:55.090333 695520 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1124 09:04:55.097848 695520 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1124 09:04:55.141844 695520 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1124 09:04:55.142222 695520 kubeadm.go:319] [preflight] Running pre-flight checks
I1124 09:04:55.176293 695520 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1124 09:04:55.176360 695520 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1124 09:04:55.176399 695520 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1124 09:04:55.176522 695520 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1124 09:04:55.176607 695520 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1124 09:04:55.176692 695520 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1124 09:04:55.176788 695520 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1124 09:04:55.176861 695520 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1124 09:04:55.176926 695520 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1124 09:04:55.177000 695520 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1124 09:04:55.177072 695520 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1124 09:04:55.267260 695520 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1124 09:04:55.267430 695520 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1124 09:04:55.267573 695520 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1124 09:04:55.406819 695520 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1124 09:04:55.408942 695520 out.go:252] - Generating certificates and keys ...
I1124 09:04:55.409040 695520 kubeadm.go:319] [certs] Using existing ca certificate authority
I1124 09:04:55.409154 695520 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1124 09:04:55.535942 695520 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1124 09:04:55.747446 695520 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1124 09:04:56.231180 695520 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1124 09:04:56.348617 695520 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1124 09:04:56.564540 695520 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1124 09:04:56.564771 695520 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-128377] and IPs [192.168.103.2 127.0.0.1 ::1]
I1124 09:04:54.234417 696018 cli_runner.go:164] Run: docker network inspect no-preload-820576 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 09:04:54.252265 696018 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I1124 09:04:54.256402 696018 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 09:04:54.271173 696018 kubeadm.go:884] updating cluster {Name:no-preload-820576 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0-beta.0 ClusterName:no-preload-820576 Namespace:default APIServerHAVIP: APIServerName:minikub
eCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:fal
se CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1124 09:04:54.271376 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:04:54.585565 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:04:54.895614 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:04:55.213448 696018 preload.go:188] Checking if preload exists for k8s version v1.35.0-beta.0 and runtime containerd
I1124 09:04:55.213537 696018 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 09:04:55.248674 696018 containerd.go:623] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.35.0-beta.0". assuming images are not preloaded.
I1124 09:04:55.248704 696018 cache_images.go:90] LoadCachedImages start: [registry.k8s.io/kube-apiserver:v1.35.0-beta.0 registry.k8s.io/kube-controller-manager:v1.35.0-beta.0 registry.k8s.io/kube-scheduler:v1.35.0-beta.0 registry.k8s.io/kube-proxy:v1.35.0-beta.0 registry.k8s.io/pause:3.10.1 registry.k8s.io/etcd:3.5.24-0 registry.k8s.io/coredns/coredns:v1.13.1 gcr.io/k8s-minikube/storage-provisioner:v5]
I1124 09:04:55.248761 696018 image.go:138] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:04:55.248818 696018 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.35.0-beta.0
I1124 09:04:55.248841 696018 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.13.1
I1124 09:04:55.248860 696018 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1124 09:04:55.248864 696018 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.35.0-beta.0
I1124 09:04:55.248833 696018 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.35.0-beta.0
I1124 09:04:55.248841 696018 image.go:138] retrieving image: registry.k8s.io/etcd:3.5.24-0
I1124 09:04:55.249034 696018 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.35.0-beta.0
I1124 09:04:55.250186 696018 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.13.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.13.1
I1124 09:04:55.250215 696018 image.go:181] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:04:55.250182 696018 image.go:181] daemon lookup for registry.k8s.io/etcd:3.5.24-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.5.24-0
I1124 09:04:55.250186 696018 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1124 09:04:55.250253 696018 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.35.0-beta.0: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.35.0-beta.0
I1124 09:04:55.250254 696018 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.35.0-beta.0: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.35.0-beta.0
I1124 09:04:55.250188 696018 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.35.0-beta.0: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.35.0-beta.0
I1124 09:04:55.250648 696018 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.35.0-beta.0: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.35.0-beta.0
I1124 09:04:55.411211 696018 containerd.go:267] Checking existence of image with name "registry.k8s.io/coredns/coredns:v1.13.1" and sha "aa5e3ebc0dfed0566805186b9e47110d8f9122291d8bad1497e78873ad291139"
I1124 09:04:55.411274 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/coredns/coredns:v1.13.1
I1124 09:04:55.432666 696018 cache_images.go:118] "registry.k8s.io/coredns/coredns:v1.13.1" needs transfer: "registry.k8s.io/coredns/coredns:v1.13.1" does not exist at hash "aa5e3ebc0dfed0566805186b9e47110d8f9122291d8bad1497e78873ad291139" in container runtime
I1124 09:04:55.432717 696018 cri.go:218] Removing image: registry.k8s.io/coredns/coredns:v1.13.1
I1124 09:04:55.432775 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:55.436380 696018 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.35.0-beta.0" and sha "8a4ded35a3eb1a80eb49c1a887194460a56b413eed7eb69e59605daf4ec23810"
I1124 09:04:55.436448 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-proxy:v1.35.0-beta.0
I1124 09:04:55.436570 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.13.1
I1124 09:04:55.438317 696018 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.35.0-beta.0" and sha "aa9d02839d8def718798bd410c88aba69248b26a8f0e3af2c728b512b67cb52b"
I1124 09:04:55.438376 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-apiserver:v1.35.0-beta.0
I1124 09:04:55.445544 696018 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.35.0-beta.0" and sha "45f3cc72d235f1cfda3de70fe9b2b9d3b356091e491b915f9efd6f0d6e5253bc"
I1124 09:04:55.445608 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-controller-manager:v1.35.0-beta.0
I1124 09:04:55.462611 696018 cache_images.go:118] "registry.k8s.io/kube-proxy:v1.35.0-beta.0" needs transfer: "registry.k8s.io/kube-proxy:v1.35.0-beta.0" does not exist at hash "8a4ded35a3eb1a80eb49c1a887194460a56b413eed7eb69e59605daf4ec23810" in container runtime
I1124 09:04:55.462672 696018 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.35.0-beta.0
I1124 09:04:55.462735 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:55.466873 696018 containerd.go:267] Checking existence of image with name "registry.k8s.io/pause:3.10.1" and sha "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f"
I1124 09:04:55.466944 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/pause:3.10.1
I1124 09:04:55.469707 696018 cache_images.go:118] "registry.k8s.io/kube-apiserver:v1.35.0-beta.0" needs transfer: "registry.k8s.io/kube-apiserver:v1.35.0-beta.0" does not exist at hash "aa9d02839d8def718798bd410c88aba69248b26a8f0e3af2c728b512b67cb52b" in container runtime
I1124 09:04:55.469760 696018 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.35.0-beta.0
I1124 09:04:55.469761 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.13.1
I1124 09:04:55.469806 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:55.476188 696018 containerd.go:267] Checking existence of image with name "registry.k8s.io/etcd:3.5.24-0" and sha "8cb12dd0c3e42c6d0175d09a060358cbb68a3ecc2ba4dbb00327c7d760e1425d"
I1124 09:04:55.476260 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/etcd:3.5.24-0
I1124 09:04:55.476601 696018 cache_images.go:118] "registry.k8s.io/kube-controller-manager:v1.35.0-beta.0" needs transfer: "registry.k8s.io/kube-controller-manager:v1.35.0-beta.0" does not exist at hash "45f3cc72d235f1cfda3de70fe9b2b9d3b356091e491b915f9efd6f0d6e5253bc" in container runtime
I1124 09:04:55.476645 696018 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.35.0-beta.0
I1124 09:04:55.476700 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:55.476760 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.35.0-beta.0
I1124 09:04:55.483510 696018 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.35.0-beta.0" and sha "7bb6219ddab95bdabbef83f051bee4fdd14b6f791aaa3121080cb2c58ada2e46"
I1124 09:04:55.483571 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-scheduler:v1.35.0-beta.0
I1124 09:04:55.493634 696018 cache_images.go:118] "registry.k8s.io/pause:3.10.1" needs transfer: "registry.k8s.io/pause:3.10.1" does not exist at hash "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f" in container runtime
I1124 09:04:55.493674 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.35.0-beta.0
I1124 09:04:55.493687 696018 cri.go:218] Removing image: registry.k8s.io/pause:3.10.1
I1124 09:04:55.493730 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:55.504559 696018 cache_images.go:118] "registry.k8s.io/etcd:3.5.24-0" needs transfer: "registry.k8s.io/etcd:3.5.24-0" does not exist at hash "8cb12dd0c3e42c6d0175d09a060358cbb68a3ecc2ba4dbb00327c7d760e1425d" in container runtime
I1124 09:04:55.504599 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.13.1
I1124 09:04:55.504606 696018 cri.go:218] Removing image: registry.k8s.io/etcd:3.5.24-0
I1124 09:04:55.504646 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:55.512866 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.35.0-beta.0
I1124 09:04:55.512892 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.35.0-beta.0
I1124 09:04:55.512910 696018 cache_images.go:118] "registry.k8s.io/kube-scheduler:v1.35.0-beta.0" needs transfer: "registry.k8s.io/kube-scheduler:v1.35.0-beta.0" does not exist at hash "7bb6219ddab95bdabbef83f051bee4fdd14b6f791aaa3121080cb2c58ada2e46" in container runtime
I1124 09:04:55.512950 696018 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.35.0-beta.0
I1124 09:04:55.512990 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:55.526695 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1124 09:04:55.526717 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.35.0-beta.0
I1124 09:04:55.526785 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.5.24-0
I1124 09:04:55.539513 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.13.1
I1124 09:04:55.539663 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.13.1
I1124 09:04:55.546674 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.35.0-beta.0
I1124 09:04:55.546750 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.35.0-beta.0
I1124 09:04:55.546715 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.35.0-beta.0
I1124 09:04:55.564076 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.5.24-0
I1124 09:04:55.567023 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/coredns_v1.13.1: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.13.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/coredns_v1.13.1': No such file or directory
I1124 09:04:55.567049 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.35.0-beta.0
I1124 09:04:55.567061 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.13.1 --> /var/lib/minikube/images/coredns_v1.13.1 (23562752 bytes)
I1124 09:04:55.567151 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1124 09:04:55.598524 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.35.0-beta.0
I1124 09:04:55.598552 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.35.0-beta.0
I1124 09:04:55.598652 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.35.0-beta.0
I1124 09:04:55.598735 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.35.0-beta.0
I1124 09:04:55.614879 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.5.24-0
I1124 09:04:55.624975 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.35.0-beta.0
I1124 09:04:55.625072 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0
I1124 09:04:55.679323 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1124 09:04:55.684055 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.35.0-beta.0
I1124 09:04:55.684090 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.35.0-beta.0
I1124 09:04:55.684124 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-proxy_v1.35.0-beta.0: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.35.0-beta.0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-proxy_v1.35.0-beta.0': No such file or directory
I1124 09:04:55.684140 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0
I1124 09:04:55.684150 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.5.24-0
I1124 09:04:55.684159 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0': No such file or directory
I1124 09:04:55.684160 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.35.0-beta.0 --> /var/lib/minikube/images/kube-proxy_v1.35.0-beta.0 (25788928 bytes)
I1124 09:04:55.684171 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.35.0-beta.0 --> /var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0 (27682304 bytes)
I1124 09:04:55.684244 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.5.24-0
I1124 09:04:55.736024 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1124 09:04:55.736135 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1
I1124 09:04:55.746073 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0': No such file or directory
I1124 09:04:55.746108 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.35.0-beta.0 --> /var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0 (23131648 bytes)
I1124 09:04:55.746157 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.35.0-beta.0
I1124 09:04:55.746175 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/etcd_3.5.24-0: stat -c "%s %y" /var/lib/minikube/images/etcd_3.5.24-0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/etcd_3.5.24-0': No such file or directory
I1124 09:04:55.746191 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.5.24-0 --> /var/lib/minikube/images/etcd_3.5.24-0 (23728640 bytes)
I1124 09:04:55.746248 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.35.0-beta.0
I1124 09:04:55.801010 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/pause_3.10.1: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/pause_3.10.1': No such file or directory
I1124 09:04:55.801049 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 --> /var/lib/minikube/images/pause_3.10.1 (321024 bytes)
I1124 09:04:55.808405 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-scheduler_v1.35.0-beta.0: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.35.0-beta.0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-scheduler_v1.35.0-beta.0': No such file or directory
I1124 09:04:55.808441 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.35.0-beta.0 --> /var/lib/minikube/images/kube-scheduler_v1.35.0-beta.0 (17239040 bytes)
I1124 09:04:55.880897 696018 containerd.go:285] Loading image: /var/lib/minikube/images/pause_3.10.1
I1124 09:04:55.880969 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/pause_3.10.1
I1124 09:04:56.015999 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 from cache
I1124 09:04:56.068815 696018 containerd.go:285] Loading image: /var/lib/minikube/images/etcd_3.5.24-0
I1124 09:04:56.068912 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.5.24-0
I1124 09:04:56.453297 696018 containerd.go:267] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562"
I1124 09:04:56.453371 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:04:57.304727 696018 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.5.24-0: (1.235782073s)
I1124 09:04:57.304763 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.5.24-0 from cache
I1124 09:04:57.304794 696018 containerd.go:285] Loading image: /var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0
I1124 09:04:57.304806 696018 cache_images.go:118] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562" in container runtime
I1124 09:04:57.304847 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0
I1124 09:04:57.304858 696018 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:04:57.304920 696018 ssh_runner.go:195] Run: which crictl
I1124 09:04:56.768431 695520 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1124 09:04:56.768677 695520 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-128377] and IPs [192.168.103.2 127.0.0.1 ::1]
I1124 09:04:57.042517 695520 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1124 09:04:57.135211 695520 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1124 09:04:57.487492 695520 kubeadm.go:319] [certs] Generating "sa" key and public key
I1124 09:04:57.487607 695520 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1124 09:04:57.647815 695520 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1124 09:04:57.788032 695520 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1124 09:04:58.007063 695520 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1124 09:04:58.262043 695520 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1124 09:04:58.262616 695520 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1124 09:04:58.265868 695520 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1124 09:04:55.921561 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1124 09:04:55.921607 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:04:58.266858 695520 out.go:252] - Booting up control plane ...
I1124 09:04:58.266989 695520 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1124 09:04:58.267065 695520 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1124 09:04:58.267746 695520 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1124 09:04:58.282824 695520 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1124 09:04:58.283699 695520 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1124 09:04:58.283773 695520 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1124 09:04:58.419897 695520 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1124 09:04:58.797650 696018 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.35.0-beta.0: (1.492766226s)
I1124 09:04:58.797672 696018 ssh_runner.go:235] Completed: which crictl: (1.492732478s)
I1124 09:04:58.797693 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.35.0-beta.0 from cache
I1124 09:04:58.797722 696018 containerd.go:285] Loading image: /var/lib/minikube/images/kube-scheduler_v1.35.0-beta.0
I1124 09:04:58.797742 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:04:58.797763 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.35.0-beta.0
I1124 09:04:59.494097 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.35.0-beta.0 from cache
I1124 09:04:59.494141 696018 containerd.go:285] Loading image: /var/lib/minikube/images/coredns_v1.13.1
I1124 09:04:59.494193 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.13.1
I1124 09:04:59.494314 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:05:00.636087 696018 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.13.1: (1.141861944s)
I1124 09:05:00.636150 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.13.1 from cache
I1124 09:05:00.636183 696018 containerd.go:285] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0
I1124 09:05:00.636184 696018 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.141835433s)
I1124 09:05:00.636272 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0
I1124 09:05:00.636277 696018 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:05:01.829551 696018 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.35.0-beta.0: (1.193240306s)
I1124 09:05:01.829586 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.35.0-beta.0 from cache
I1124 09:05:01.829561 696018 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.193259021s)
I1124 09:05:01.829618 696018 containerd.go:285] Loading image: /var/lib/minikube/images/kube-proxy_v1.35.0-beta.0
I1124 09:05:01.829656 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.35.0-beta.0
I1124 09:05:01.829661 696018 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5
I1124 09:05:01.829741 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5
I1124 09:05:02.922442 695520 kubeadm.go:319] [apiclient] All control plane components are healthy after 4.502768 seconds
I1124 09:05:02.922650 695520 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1124 09:05:02.938003 695520 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1124 09:05:03.487168 695520 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1124 09:05:03.487569 695520 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-128377 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1124 09:05:03.997647 695520 kubeadm.go:319] [bootstrap-token] Using token: jnao2u.ovlrxqviyhx4po41
I1124 09:05:03.999063 695520 out.go:252] - Configuring RBAC rules ...
I1124 09:05:03.999223 695520 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1124 09:05:04.003823 695520 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1124 09:05:04.010298 695520 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1124 09:05:04.012923 695520 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1124 09:05:04.015535 695520 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1124 09:05:04.019043 695520 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1124 09:05:04.029389 695520 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1124 09:05:04.209549 695520 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1124 09:05:04.407855 695520 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1124 09:05:04.408750 695520 kubeadm.go:319]
I1124 09:05:04.408814 695520 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1124 09:05:04.408821 695520 kubeadm.go:319]
I1124 09:05:04.408930 695520 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1124 09:05:04.408949 695520 kubeadm.go:319]
I1124 09:05:04.408983 695520 kubeadm.go:319] mkdir -p $HOME/.kube
I1124 09:05:04.409060 695520 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1124 09:05:04.409107 695520 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1124 09:05:04.409122 695520 kubeadm.go:319]
I1124 09:05:04.409207 695520 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1124 09:05:04.409227 695520 kubeadm.go:319]
I1124 09:05:04.409283 695520 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1124 09:05:04.409289 695520 kubeadm.go:319]
I1124 09:05:04.409340 695520 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1124 09:05:04.409401 695520 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1124 09:05:04.409519 695520 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1124 09:05:04.409531 695520 kubeadm.go:319]
I1124 09:05:04.409633 695520 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1124 09:05:04.409739 695520 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1124 09:05:04.409748 695520 kubeadm.go:319]
I1124 09:05:04.409856 695520 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token jnao2u.ovlrxqviyhx4po41 \
I1124 09:05:04.409989 695520 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:058f105135414f3c09406a88ceaaa8a4946b8fa5ee02b1189df823d65cc738be \
I1124 09:05:04.410028 695520 kubeadm.go:319] --control-plane
I1124 09:05:04.410043 695520 kubeadm.go:319]
I1124 09:05:04.410157 695520 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1124 09:05:04.410168 695520 kubeadm.go:319]
I1124 09:05:04.410253 695520 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token jnao2u.ovlrxqviyhx4po41 \
I1124 09:05:04.410416 695520 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:058f105135414f3c09406a88ceaaa8a4946b8fa5ee02b1189df823d65cc738be
I1124 09:05:04.412734 695520 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1124 09:05:04.412863 695520 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1124 09:05:04.412887 695520 cni.go:84] Creating CNI manager for ""
I1124 09:05:04.412895 695520 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 09:05:04.414780 695520 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1124 09:05:00.922661 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1124 09:05:00.922710 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:04.415630 695520 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1124 09:05:04.420099 695520 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1124 09:05:04.420115 695520 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1124 09:05:04.433073 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1124 09:05:05.091722 695520 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1124 09:05:05.091870 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-128377 minikube.k8s.io/updated_at=2025_11_24T09_05_05_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=393ee3e0b845623107dce6cda4f48ffd5c3d1811 minikube.k8s.io/name=old-k8s-version-128377 minikube.k8s.io/primary=true
I1124 09:05:05.092348 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:05.102498 695520 ops.go:34] apiserver oom_adj: -16
I1124 09:05:05.174868 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:05.675283 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:06.175310 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:02.915588 696018 ssh_runner.go:235] Completed: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: (1.085815853s)
I1124 09:05:02.915634 696018 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.35.0-beta.0: (1.085954166s)
I1124 09:05:02.915671 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.35.0-beta.0 from cache
I1124 09:05:02.915639 696018 ssh_runner.go:352] existence check for /var/lib/minikube/images/storage-provisioner_v5: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/storage-provisioner_v5': No such file or directory
I1124 09:05:02.915716 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 --> /var/lib/minikube/images/storage-provisioner_v5 (9060352 bytes)
I1124 09:05:02.976753 696018 containerd.go:285] Loading image: /var/lib/minikube/images/storage-provisioner_v5
I1124 09:05:02.976825 696018 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5
I1124 09:05:03.348632 696018 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21978-435860/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
I1124 09:05:03.348678 696018 cache_images.go:125] Successfully loaded all cached images
I1124 09:05:03.348686 696018 cache_images.go:94] duration metric: took 8.099965824s to LoadCachedImages
I1124 09:05:03.348703 696018 kubeadm.go:935] updating node { 192.168.85.2 8443 v1.35.0-beta.0 containerd true true} ...
I1124 09:05:03.348825 696018 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.35.0-beta.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-820576 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.35.0-beta.0 ClusterName:no-preload-820576 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1124 09:05:03.348894 696018 ssh_runner.go:195] Run: sudo crictl info
I1124 09:05:03.376137 696018 cni.go:84] Creating CNI manager for ""
I1124 09:05:03.376168 696018 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 09:05:03.376188 696018 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1124 09:05:03.376210 696018 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.35.0-beta.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-820576 NodeName:no-preload-820576 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt Stat
icPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1124 09:05:03.376350 696018 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "no-preload-820576"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.85.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.35.0-beta.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1124 09:05:03.376422 696018 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.35.0-beta.0
I1124 09:05:03.385368 696018 binaries.go:54] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.35.0-beta.0: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/binaries/v1.35.0-beta.0': No such file or directory
Initiating transfer...
I1124 09:05:03.385424 696018 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.35.0-beta.0
I1124 09:05:03.394095 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubectl?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubectl.sha256
I1124 09:05:03.394128 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubeadm.sha256
I1124 09:05:03.394180 696018 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubelet?checksum=file:https://dl.k8s.io/release/v1.35.0-beta.0/bin/linux/amd64/kubelet.sha256
I1124 09:05:03.394191 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl
I1124 09:05:03.394205 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.35.0-beta.0/kubeadm
I1124 09:05:03.394225 696018 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 09:05:03.399712 696018 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.35.0-beta.0/kubeadm: stat -c "%s %y" /var/lib/minikube/binaries/v1.35.0-beta.0/kubeadm: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.35.0-beta.0/kubeadm': No such file or directory
I1124 09:05:03.399743 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/linux/amd64/v1.35.0-beta.0/kubeadm --> /var/lib/minikube/binaries/v1.35.0-beta.0/kubeadm (72364216 bytes)
I1124 09:05:03.399797 696018 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl: stat -c "%s %y" /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.35.0-beta.0/kubectl': No such file or directory
I1124 09:05:03.399839 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/linux/amd64/v1.35.0-beta.0/kubectl --> /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl (58589368 bytes)
I1124 09:05:03.414063 696018 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.35.0-beta.0/kubelet
I1124 09:05:03.448582 696018 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.35.0-beta.0/kubelet: stat -c "%s %y" /var/lib/minikube/binaries/v1.35.0-beta.0/kubelet: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.35.0-beta.0/kubelet': No such file or directory
I1124 09:05:03.448623 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/cache/linux/amd64/v1.35.0-beta.0/kubelet --> /var/lib/minikube/binaries/v1.35.0-beta.0/kubelet (58106148 bytes)
I1124 09:05:03.941988 696018 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1124 09:05:03.950659 696018 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (328 bytes)
I1124 09:05:03.964545 696018 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (359 bytes)
I1124 09:05:03.980698 696018 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2236 bytes)
I1124 09:05:03.994370 696018 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I1124 09:05:03.999682 696018 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 09:05:04.011951 696018 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 09:05:04.105068 696018 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 09:05:04.129581 696018 certs.go:69] Setting up /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576 for IP: 192.168.85.2
I1124 09:05:04.129609 696018 certs.go:195] generating shared ca certs ...
I1124 09:05:04.129631 696018 certs.go:227] acquiring lock for ca certs: {Name:mk977567029a87925dffc7f909bfa5f74bf239fc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:04.129796 696018 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21978-435860/.minikube/ca.key
I1124 09:05:04.129861 696018 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21978-435860/.minikube/proxy-client-ca.key
I1124 09:05:04.129876 696018 certs.go:257] generating profile certs ...
I1124 09:05:04.129944 696018 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/client.key
I1124 09:05:04.129964 696018 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/client.crt with IP's: []
I1124 09:05:04.178331 696018 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/client.crt ...
I1124 09:05:04.178368 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/client.crt: {Name:mk7a6d48f62cb24db3b80fa6902658a2fab15360 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:04.178586 696018 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/client.key ...
I1124 09:05:04.178605 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/client.key: {Name:mke761c4ec29e36beccc716dc800bc8fd841e3c6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:04.178724 696018 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.key.402ae632
I1124 09:05:04.178748 696018 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.crt.402ae632 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.85.2]
I1124 09:05:04.417670 696018 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.crt.402ae632 ...
I1124 09:05:04.417694 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.crt.402ae632: {Name:mk59a2d57d772e51aeeeb2a9a4dca760203e6d09 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:04.417874 696018 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.key.402ae632 ...
I1124 09:05:04.417897 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.key.402ae632: {Name:mkdb0be38fd80ef77438b49aa69b9308c6d28ca3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:04.418023 696018 certs.go:382] copying /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.crt.402ae632 -> /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.crt
I1124 09:05:04.418147 696018 certs.go:386] copying /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.key.402ae632 -> /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.key
I1124 09:05:04.418202 696018 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.key
I1124 09:05:04.418217 696018 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.crt with IP's: []
I1124 09:05:04.604435 696018 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.crt ...
I1124 09:05:04.604497 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.crt: {Name:mk5719f2112f16d39272baf4588ce9b65d33d2a3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:04.604728 696018 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.key ...
I1124 09:05:04.604746 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.key: {Name:mk56d8ccc21a879d6506ee3380097e85fb4b4f95 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:04.605022 696018 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/439524.pem (1338 bytes)
W1124 09:05:04.605073 696018 certs.go:480] ignoring /home/jenkins/minikube-integration/21978-435860/.minikube/certs/439524_empty.pem, impossibly tiny 0 bytes
I1124 09:05:04.605084 696018 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca-key.pem (1675 bytes)
I1124 09:05:04.605120 696018 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/ca.pem (1082 bytes)
I1124 09:05:04.605160 696018 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/cert.pem (1123 bytes)
I1124 09:05:04.605195 696018 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/certs/key.pem (1675 bytes)
I1124 09:05:04.605369 696018 certs.go:484] found cert: /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem (1708 bytes)
I1124 09:05:04.606568 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1124 09:05:04.626964 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1124 09:05:04.644973 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1124 09:05:04.663649 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1124 09:05:04.681360 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1124 09:05:04.699027 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1124 09:05:04.716381 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1124 09:05:04.734298 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/profiles/no-preload-820576/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1124 09:05:04.752033 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/files/etc/ssl/certs/4395242.pem --> /usr/share/ca-certificates/4395242.pem (1708 bytes)
I1124 09:05:04.771861 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1124 09:05:04.789824 696018 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21978-435860/.minikube/certs/439524.pem --> /usr/share/ca-certificates/439524.pem (1338 bytes)
I1124 09:05:04.808313 696018 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1124 09:05:04.826085 696018 ssh_runner.go:195] Run: openssl version
I1124 09:05:04.834356 696018 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1124 09:05:04.843772 696018 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1124 09:05:04.848660 696018 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 24 08:30 /usr/share/ca-certificates/minikubeCA.pem
I1124 09:05:04.848725 696018 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1124 09:05:04.887168 696018 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1124 09:05:04.897113 696018 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/439524.pem && ln -fs /usr/share/ca-certificates/439524.pem /etc/ssl/certs/439524.pem"
I1124 09:05:04.907480 696018 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/439524.pem
I1124 09:05:04.911694 696018 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 24 08:38 /usr/share/ca-certificates/439524.pem
I1124 09:05:04.911746 696018 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/439524.pem
I1124 09:05:04.951326 696018 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/439524.pem /etc/ssl/certs/51391683.0"
I1124 09:05:04.961765 696018 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/4395242.pem && ln -fs /usr/share/ca-certificates/4395242.pem /etc/ssl/certs/4395242.pem"
I1124 09:05:04.972056 696018 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/4395242.pem
I1124 09:05:04.976497 696018 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 24 08:38 /usr/share/ca-certificates/4395242.pem
I1124 09:05:04.976554 696018 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/4395242.pem
I1124 09:05:05.017003 696018 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/4395242.pem /etc/ssl/certs/3ec20f2e.0"
I1124 09:05:05.027292 696018 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1124 09:05:05.031547 696018 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1124 09:05:05.031616 696018 kubeadm.go:401] StartCluster: {Name:no-preload-820576 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0-beta.0 ClusterName:no-preload-820576 Namespace:default APIServerHAVIP: APIServerName:minikubeCA
APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false
CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 09:05:05.031711 696018 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1124 09:05:05.031765 696018 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1124 09:05:05.062044 696018 cri.go:89] found id: ""
I1124 09:05:05.062126 696018 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1124 09:05:05.071887 696018 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1124 09:05:05.082157 696018 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1124 09:05:05.082217 696018 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1124 09:05:05.091225 696018 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1124 09:05:05.091248 696018 kubeadm.go:158] found existing configuration files:
I1124 09:05:05.091296 696018 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1124 09:05:05.100600 696018 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1124 09:05:05.100657 696018 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1124 09:05:05.110555 696018 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1124 09:05:05.119216 696018 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1124 09:05:05.119288 696018 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1124 09:05:05.127876 696018 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1124 09:05:05.136154 696018 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1124 09:05:05.136205 696018 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1124 09:05:05.145077 696018 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1124 09:05:05.154290 696018 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1124 09:05:05.154338 696018 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1124 09:05:05.162702 696018 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-beta.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1124 09:05:05.200662 696018 kubeadm.go:319] [init] Using Kubernetes version: v1.35.0-beta.0
I1124 09:05:05.200757 696018 kubeadm.go:319] [preflight] Running pre-flight checks
I1124 09:05:05.269623 696018 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1124 09:05:05.269714 696018 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1124 09:05:05.269770 696018 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1124 09:05:05.269842 696018 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1124 09:05:05.269920 696018 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1124 09:05:05.270003 696018 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1124 09:05:05.270084 696018 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1124 09:05:05.270155 696018 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1124 09:05:05.270223 696018 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1124 09:05:05.270303 696018 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1124 09:05:05.270377 696018 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1124 09:05:05.332844 696018 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1124 09:05:05.332992 696018 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1124 09:05:05.333150 696018 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1124 09:05:06.734694 696018 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1124 09:05:06.738817 696018 out.go:252] - Generating certificates and keys ...
I1124 09:05:06.738929 696018 kubeadm.go:319] [certs] Using existing ca certificate authority
I1124 09:05:06.739072 696018 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1124 09:05:06.832143 696018 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1124 09:05:06.955015 696018 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1124 09:05:07.027143 696018 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1124 09:05:07.115762 696018 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1124 09:05:07.265716 696018 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1124 09:05:07.265857 696018 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost no-preload-820576] and IPs [192.168.85.2 127.0.0.1 ::1]
I1124 09:05:07.364684 696018 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1124 09:05:07.364865 696018 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost no-preload-820576] and IPs [192.168.85.2 127.0.0.1 ::1]
I1124 09:05:07.523315 696018 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1124 09:05:07.590589 696018 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1124 09:05:07.746307 696018 kubeadm.go:319] [certs] Generating "sa" key and public key
I1124 09:05:07.746426 696018 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1124 09:05:07.869677 696018 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1124 09:05:07.978931 696018 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1124 09:05:08.053720 696018 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1124 09:05:08.085227 696018 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1124 09:05:08.160011 696018 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1124 09:05:08.160849 696018 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1124 09:05:08.165435 696018 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1124 09:05:05.923694 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1124 09:05:05.923742 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:06.675415 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:07.175277 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:07.676031 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:08.174962 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:08.675088 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:09.175102 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:09.675096 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:10.175027 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:10.675655 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:11.175703 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:08.166975 696018 out.go:252] - Booting up control plane ...
I1124 09:05:08.167117 696018 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1124 09:05:08.167189 696018 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1124 09:05:08.167816 696018 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1124 09:05:08.183769 696018 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1124 09:05:08.183936 696018 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1124 09:05:08.191856 696018 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1124 09:05:08.191990 696018 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1124 09:05:08.192031 696018 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1124 09:05:08.308076 696018 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1124 09:05:08.308205 696018 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1124 09:05:09.309901 696018 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.001908715s
I1124 09:05:09.316051 696018 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1124 09:05:09.316157 696018 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.85.2:8443/livez
I1124 09:05:09.316247 696018 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1124 09:05:09.316315 696018 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1124 09:05:10.320869 696018 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 1.004644301s
I1124 09:05:10.832866 696018 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 1.516703459s
I1124 09:05:12.317179 696018 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 3.001080604s
I1124 09:05:12.331544 696018 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1124 09:05:12.339378 696018 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1124 09:05:12.347526 696018 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1124 09:05:12.347705 696018 kubeadm.go:319] [mark-control-plane] Marking the node no-preload-820576 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1124 09:05:12.354657 696018 kubeadm.go:319] [bootstrap-token] Using token: awoygq.wealvtzys3befsou
I1124 09:05:12.355757 696018 out.go:252] - Configuring RBAC rules ...
I1124 09:05:12.355888 696018 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1124 09:05:12.359613 696018 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1124 09:05:12.364202 696018 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1124 09:05:12.366491 696018 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1124 09:05:12.369449 696018 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1124 09:05:12.371508 696018 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1124 09:05:12.722783 696018 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1124 09:05:13.137535 696018 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1124 09:05:13.723038 696018 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1124 09:05:13.724197 696018 kubeadm.go:319]
I1124 09:05:13.724302 696018 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1124 09:05:13.724317 696018 kubeadm.go:319]
I1124 09:05:13.724412 696018 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1124 09:05:13.724424 696018 kubeadm.go:319]
I1124 09:05:13.724520 696018 kubeadm.go:319] mkdir -p $HOME/.kube
I1124 09:05:13.724630 696018 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1124 09:05:13.724716 696018 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1124 09:05:13.724730 696018 kubeadm.go:319]
I1124 09:05:13.724818 696018 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1124 09:05:13.724831 696018 kubeadm.go:319]
I1124 09:05:13.724897 696018 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1124 09:05:13.724906 696018 kubeadm.go:319]
I1124 09:05:13.724990 696018 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1124 09:05:13.725105 696018 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1124 09:05:13.725212 696018 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1124 09:05:13.725221 696018 kubeadm.go:319]
I1124 09:05:13.725338 696018 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1124 09:05:13.725493 696018 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1124 09:05:13.725510 696018 kubeadm.go:319]
I1124 09:05:13.725601 696018 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token awoygq.wealvtzys3befsou \
I1124 09:05:13.725765 696018 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:058f105135414f3c09406a88ceaaa8a4946b8fa5ee02b1189df823d65cc738be \
I1124 09:05:13.725804 696018 kubeadm.go:319] --control-plane
I1124 09:05:13.725816 696018 kubeadm.go:319]
I1124 09:05:13.725934 696018 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1124 09:05:13.725944 696018 kubeadm.go:319]
I1124 09:05:13.726041 696018 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token awoygq.wealvtzys3befsou \
I1124 09:05:13.726243 696018 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:058f105135414f3c09406a88ceaaa8a4946b8fa5ee02b1189df823d65cc738be
I1124 09:05:13.728504 696018 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1124 09:05:13.728661 696018 kubeadm.go:319] [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1124 09:05:13.728704 696018 cni.go:84] Creating CNI manager for ""
I1124 09:05:13.728716 696018 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 09:05:13.730529 696018 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1124 09:05:10.924882 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1124 09:05:10.924923 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:11.109506 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": read tcp 192.168.76.1:47578->192.168.76.2:8443: read: connection reset by peer
I1124 09:05:11.421112 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:11.421646 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:11.920950 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:11.921496 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:12.421219 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:12.421692 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:12.921430 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:12.921911 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:13.420431 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:13.420926 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:13.920542 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:13.921060 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:14.420434 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:14.420859 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:11.675776 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:12.175192 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:12.675267 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:13.175941 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:13.675281 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:14.175267 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:14.675185 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:15.175391 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:15.675966 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:16.175887 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:16.675144 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:17.175281 695520 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:17.260591 695520 kubeadm.go:1114] duration metric: took 12.168846115s to wait for elevateKubeSystemPrivileges
I1124 09:05:17.260625 695520 kubeadm.go:403] duration metric: took 22.275566194s to StartCluster
I1124 09:05:17.260655 695520 settings.go:142] acquiring lock: {Name:mk02cbf979fc883a7cfa89d50f2f1c6cf88236e4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:17.260738 695520 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21978-435860/kubeconfig
I1124 09:05:17.261860 695520 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/kubeconfig: {Name:mk42183bd63f8b44881819ac352384aa0ef5afa7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:17.262121 695520 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1124 09:05:17.262124 695520 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 09:05:17.262197 695520 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1124 09:05:17.262308 695520 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-128377"
I1124 09:05:17.262334 695520 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-128377"
I1124 09:05:17.262358 695520 config.go:182] Loaded profile config "old-k8s-version-128377": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 09:05:17.262376 695520 host.go:66] Checking if "old-k8s-version-128377" exists ...
I1124 09:05:17.262351 695520 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-128377"
I1124 09:05:17.262443 695520 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-128377"
I1124 09:05:17.262844 695520 cli_runner.go:164] Run: docker container inspect old-k8s-version-128377 --format={{.State.Status}}
I1124 09:05:17.263075 695520 cli_runner.go:164] Run: docker container inspect old-k8s-version-128377 --format={{.State.Status}}
I1124 09:05:17.263365 695520 out.go:179] * Verifying Kubernetes components...
I1124 09:05:17.264408 695520 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 09:05:17.287510 695520 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-128377"
I1124 09:05:17.287559 695520 host.go:66] Checking if "old-k8s-version-128377" exists ...
I1124 09:05:17.287978 695520 cli_runner.go:164] Run: docker container inspect old-k8s-version-128377 --format={{.State.Status}}
I1124 09:05:17.288769 695520 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:05:13.732137 696018 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1124 09:05:13.737711 696018 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl ...
I1124 09:05:13.737726 696018 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1124 09:05:13.752118 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1124 09:05:13.951744 696018 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1124 09:05:13.951795 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:13.951847 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes no-preload-820576 minikube.k8s.io/updated_at=2025_11_24T09_05_13_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=393ee3e0b845623107dce6cda4f48ffd5c3d1811 minikube.k8s.io/name=no-preload-820576 minikube.k8s.io/primary=true
I1124 09:05:13.962047 696018 ops.go:34] apiserver oom_adj: -16
I1124 09:05:14.022754 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:14.523671 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:15.023231 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:15.523083 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:16.023230 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:16.523666 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:17.022940 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:17.523444 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:17.290230 695520 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1124 09:05:17.290253 695520 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1124 09:05:17.290314 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:05:17.317679 695520 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1124 09:05:17.317704 695520 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1124 09:05:17.317768 695520 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-128377
I1124 09:05:17.319048 695520 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33068 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa Username:docker}
I1124 09:05:17.343853 695520 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33068 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/old-k8s-version-128377/id_rsa Username:docker}
I1124 09:05:17.366525 695520 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.103.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1124 09:05:17.411998 695520 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 09:05:17.447003 695520 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1124 09:05:17.463082 695520 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1124 09:05:17.632983 695520 start.go:977] {"host.minikube.internal": 192.168.103.1} host record injected into CoreDNS's ConfigMap
I1124 09:05:17.634312 695520 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-128377" to be "Ready" ...
I1124 09:05:17.888856 695520 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1124 09:05:18.022851 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:18.523601 696018 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 09:05:18.589169 696018 kubeadm.go:1114] duration metric: took 4.637423043s to wait for elevateKubeSystemPrivileges
I1124 09:05:18.589209 696018 kubeadm.go:403] duration metric: took 13.557597169s to StartCluster
I1124 09:05:18.589237 696018 settings.go:142] acquiring lock: {Name:mk02cbf979fc883a7cfa89d50f2f1c6cf88236e4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:18.589321 696018 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21978-435860/kubeconfig
I1124 09:05:18.590747 696018 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21978-435860/kubeconfig: {Name:mk42183bd63f8b44881819ac352384aa0ef5afa7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 09:05:18.590988 696018 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1124 09:05:18.591000 696018 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 09:05:18.591095 696018 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1124 09:05:18.591206 696018 addons.go:70] Setting storage-provisioner=true in profile "no-preload-820576"
I1124 09:05:18.591219 696018 config.go:182] Loaded profile config "no-preload-820576": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.35.0-beta.0
I1124 09:05:18.591236 696018 addons.go:239] Setting addon storage-provisioner=true in "no-preload-820576"
I1124 09:05:18.591251 696018 addons.go:70] Setting default-storageclass=true in profile "no-preload-820576"
I1124 09:05:18.591275 696018 host.go:66] Checking if "no-preload-820576" exists ...
I1124 09:05:18.591283 696018 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "no-preload-820576"
I1124 09:05:18.591664 696018 cli_runner.go:164] Run: docker container inspect no-preload-820576 --format={{.State.Status}}
I1124 09:05:18.591855 696018 cli_runner.go:164] Run: docker container inspect no-preload-820576 --format={{.State.Status}}
I1124 09:05:18.592299 696018 out.go:179] * Verifying Kubernetes components...
I1124 09:05:18.593599 696018 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 09:05:18.615163 696018 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1124 09:05:18.615451 696018 addons.go:239] Setting addon default-storageclass=true in "no-preload-820576"
I1124 09:05:18.615530 696018 host.go:66] Checking if "no-preload-820576" exists ...
I1124 09:05:18.615851 696018 cli_runner.go:164] Run: docker container inspect no-preload-820576 --format={{.State.Status}}
I1124 09:05:18.616223 696018 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1124 09:05:18.616245 696018 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1124 09:05:18.616301 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:05:18.646443 696018 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa Username:docker}
I1124 09:05:18.647885 696018 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1124 09:05:18.647963 696018 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1124 09:05:18.648059 696018 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-820576
I1124 09:05:18.675529 696018 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21978-435860/.minikube/machines/no-preload-820576/id_rsa Username:docker}
I1124 09:05:18.685797 696018 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.85.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1124 09:05:18.752704 696018 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 09:05:18.775922 696018 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1124 09:05:18.800792 696018 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1124 09:05:18.878758 696018 start.go:977] {"host.minikube.internal": 192.168.85.1} host record injected into CoreDNS's ConfigMap
I1124 09:05:18.880873 696018 node_ready.go:35] waiting up to 6m0s for node "no-preload-820576" to be "Ready" ...
I1124 09:05:19.096304 696018 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1124 09:05:14.921188 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:14.921633 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:15.421327 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:15.421818 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:15.920573 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:15.921034 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:16.421282 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:16.421841 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:16.921386 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:16.921942 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:17.420551 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:17.421007 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:17.920666 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:17.921181 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:18.420539 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:18.421011 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:18.920611 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:18.921079 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:19.420539 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:19.421004 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:17.889849 695520 addons.go:530] duration metric: took 627.656763ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1124 09:05:18.137738 695520 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-128377" context rescaled to 1 replicas
W1124 09:05:19.637948 695520 node_ready.go:57] node "old-k8s-version-128377" has "Ready":"False" status (will retry)
I1124 09:05:19.097398 696018 addons.go:530] duration metric: took 506.310963ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1124 09:05:19.383938 696018 kapi.go:214] "coredns" deployment in "kube-system" namespace and "no-preload-820576" context rescaled to 1 replicas
W1124 09:05:20.884989 696018 node_ready.go:57] node "no-preload-820576" has "Ready":"False" status (will retry)
I1124 09:05:19.920806 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:19.921207 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:20.420831 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:20.421312 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:20.920613 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:20.921185 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:21.420832 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:21.421240 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:21.920531 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 09:05:21.921019 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 09:05:22.420552 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
W1124 09:05:21.638057 695520 node_ready.go:57] node "old-k8s-version-128377" has "Ready":"False" status (will retry)
W1124 09:05:23.638668 695520 node_ready.go:57] node "old-k8s-version-128377" has "Ready":"False" status (will retry)
W1124 09:05:26.137883 695520 node_ready.go:57] node "old-k8s-version-128377" has "Ready":"False" status (will retry)
W1124 09:05:23.383937 696018 node_ready.go:57] node "no-preload-820576" has "Ready":"False" status (will retry)
W1124 09:05:25.384443 696018 node_ready.go:57] node "no-preload-820576" has "Ready":"False" status (will retry)
I1124 09:05:27.421276 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1124 09:05:27.421318 685562 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
W1124 09:05:28.138098 695520 node_ready.go:57] node "old-k8s-version-128377" has "Ready":"False" status (will retry)
W1124 09:05:30.638120 695520 node_ready.go:57] node "old-k8s-version-128377" has "Ready":"False" status (will retry)
W1124 09:05:27.884284 696018 node_ready.go:57] node "no-preload-820576" has "Ready":"False" status (will retry)
W1124 09:05:29.884474 696018 node_ready.go:57] node "no-preload-820576" has "Ready":"False" status (will retry)
W1124 09:05:32.384199 696018 node_ready.go:57] node "no-preload-820576" has "Ready":"False" status (will retry)
I1124 09:05:31.637332 695520 node_ready.go:49] node "old-k8s-version-128377" is "Ready"
I1124 09:05:31.637368 695520 node_ready.go:38] duration metric: took 14.003009675s for node "old-k8s-version-128377" to be "Ready" ...
I1124 09:05:31.637385 695520 api_server.go:52] waiting for apiserver process to appear ...
I1124 09:05:31.637443 695520 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1124 09:05:31.650126 695520 api_server.go:72] duration metric: took 14.387953281s to wait for apiserver process to appear ...
I1124 09:05:31.650156 695520 api_server.go:88] waiting for apiserver healthz status ...
I1124 09:05:31.650179 695520 api_server.go:253] Checking apiserver healthz at https://192.168.103.2:8443/healthz ...
I1124 09:05:31.654078 695520 api_server.go:279] https://192.168.103.2:8443/healthz returned 200:
ok
I1124 09:05:31.655253 695520 api_server.go:141] control plane version: v1.28.0
I1124 09:05:31.655280 695520 api_server.go:131] duration metric: took 5.117021ms to wait for apiserver health ...
I1124 09:05:31.655289 695520 system_pods.go:43] waiting for kube-system pods to appear ...
I1124 09:05:31.658830 695520 system_pods.go:59] 8 kube-system pods found
I1124 09:05:31.658868 695520 system_pods.go:61] "coredns-5dd5756b68-vxxnm" [b84bae0f-9f75-4d1c-b2ed-da0c10a141cf] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:31.658877 695520 system_pods.go:61] "etcd-old-k8s-version-128377" [57d9a965-4f1a-455f-beec-16601bd921e2] Running
I1124 09:05:31.658889 695520 system_pods.go:61] "kindnet-gbp66" [49954742-ea7f-466f-80d8-7d6ac88ce36c] Running
I1124 09:05:31.658895 695520 system_pods.go:61] "kube-apiserver-old-k8s-version-128377" [08c8bb94-e597-4293-80f1-0981f51b22a4] Running
I1124 09:05:31.658906 695520 system_pods.go:61] "kube-controller-manager-old-k8s-version-128377" [1f721a4b-e1c3-4e18-92b4-13673dc37600] Running
I1124 09:05:31.658910 695520 system_pods.go:61] "kube-proxy-fpbs2" [52128126-550d-4795-9fa1-e1d3d9510dd3] Running
I1124 09:05:31.658916 695520 system_pods.go:61] "kube-scheduler-old-k8s-version-128377" [399dcc23-9970-4146-82b3-c72d3e5f621b] Running
I1124 09:05:31.658921 695520 system_pods.go:61] "storage-provisioner" [7e4f56c0-0b49-47cd-9278-129ad898b781] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:31.658927 695520 system_pods.go:74] duration metric: took 3.632262ms to wait for pod list to return data ...
I1124 09:05:31.658936 695520 default_sa.go:34] waiting for default service account to be created ...
I1124 09:05:31.660923 695520 default_sa.go:45] found service account: "default"
I1124 09:05:31.660942 695520 default_sa.go:55] duration metric: took 2.000088ms for default service account to be created ...
I1124 09:05:31.660950 695520 system_pods.go:116] waiting for k8s-apps to be running ...
I1124 09:05:31.664223 695520 system_pods.go:86] 8 kube-system pods found
I1124 09:05:31.664263 695520 system_pods.go:89] "coredns-5dd5756b68-vxxnm" [b84bae0f-9f75-4d1c-b2ed-da0c10a141cf] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:31.664272 695520 system_pods.go:89] "etcd-old-k8s-version-128377" [57d9a965-4f1a-455f-beec-16601bd921e2] Running
I1124 09:05:31.664280 695520 system_pods.go:89] "kindnet-gbp66" [49954742-ea7f-466f-80d8-7d6ac88ce36c] Running
I1124 09:05:31.664284 695520 system_pods.go:89] "kube-apiserver-old-k8s-version-128377" [08c8bb94-e597-4293-80f1-0981f51b22a4] Running
I1124 09:05:31.664287 695520 system_pods.go:89] "kube-controller-manager-old-k8s-version-128377" [1f721a4b-e1c3-4e18-92b4-13673dc37600] Running
I1124 09:05:31.664291 695520 system_pods.go:89] "kube-proxy-fpbs2" [52128126-550d-4795-9fa1-e1d3d9510dd3] Running
I1124 09:05:31.664294 695520 system_pods.go:89] "kube-scheduler-old-k8s-version-128377" [399dcc23-9970-4146-82b3-c72d3e5f621b] Running
I1124 09:05:31.664300 695520 system_pods.go:89] "storage-provisioner" [7e4f56c0-0b49-47cd-9278-129ad898b781] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:31.664333 695520 retry.go:31] will retry after 195.108791ms: missing components: kube-dns
I1124 09:05:31.863438 695520 system_pods.go:86] 8 kube-system pods found
I1124 09:05:31.863494 695520 system_pods.go:89] "coredns-5dd5756b68-vxxnm" [b84bae0f-9f75-4d1c-b2ed-da0c10a141cf] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:31.863505 695520 system_pods.go:89] "etcd-old-k8s-version-128377" [57d9a965-4f1a-455f-beec-16601bd921e2] Running
I1124 09:05:31.863515 695520 system_pods.go:89] "kindnet-gbp66" [49954742-ea7f-466f-80d8-7d6ac88ce36c] Running
I1124 09:05:31.863520 695520 system_pods.go:89] "kube-apiserver-old-k8s-version-128377" [08c8bb94-e597-4293-80f1-0981f51b22a4] Running
I1124 09:05:31.863525 695520 system_pods.go:89] "kube-controller-manager-old-k8s-version-128377" [1f721a4b-e1c3-4e18-92b4-13673dc37600] Running
I1124 09:05:31.863528 695520 system_pods.go:89] "kube-proxy-fpbs2" [52128126-550d-4795-9fa1-e1d3d9510dd3] Running
I1124 09:05:31.863540 695520 system_pods.go:89] "kube-scheduler-old-k8s-version-128377" [399dcc23-9970-4146-82b3-c72d3e5f621b] Running
I1124 09:05:31.863557 695520 system_pods.go:89] "storage-provisioner" [7e4f56c0-0b49-47cd-9278-129ad898b781] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:31.863579 695520 retry.go:31] will retry after 244.252087ms: missing components: kube-dns
I1124 09:05:32.111547 695520 system_pods.go:86] 8 kube-system pods found
I1124 09:05:32.111586 695520 system_pods.go:89] "coredns-5dd5756b68-vxxnm" [b84bae0f-9f75-4d1c-b2ed-da0c10a141cf] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:32.111595 695520 system_pods.go:89] "etcd-old-k8s-version-128377" [57d9a965-4f1a-455f-beec-16601bd921e2] Running
I1124 09:05:32.111603 695520 system_pods.go:89] "kindnet-gbp66" [49954742-ea7f-466f-80d8-7d6ac88ce36c] Running
I1124 09:05:32.111608 695520 system_pods.go:89] "kube-apiserver-old-k8s-version-128377" [08c8bb94-e597-4293-80f1-0981f51b22a4] Running
I1124 09:05:32.111614 695520 system_pods.go:89] "kube-controller-manager-old-k8s-version-128377" [1f721a4b-e1c3-4e18-92b4-13673dc37600] Running
I1124 09:05:32.111628 695520 system_pods.go:89] "kube-proxy-fpbs2" [52128126-550d-4795-9fa1-e1d3d9510dd3] Running
I1124 09:05:32.111634 695520 system_pods.go:89] "kube-scheduler-old-k8s-version-128377" [399dcc23-9970-4146-82b3-c72d3e5f621b] Running
I1124 09:05:32.111641 695520 system_pods.go:89] "storage-provisioner" [7e4f56c0-0b49-47cd-9278-129ad898b781] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:32.111660 695520 retry.go:31] will retry after 471.342676ms: missing components: kube-dns
I1124 09:05:32.587354 695520 system_pods.go:86] 8 kube-system pods found
I1124 09:05:32.587384 695520 system_pods.go:89] "coredns-5dd5756b68-vxxnm" [b84bae0f-9f75-4d1c-b2ed-da0c10a141cf] Running
I1124 09:05:32.587389 695520 system_pods.go:89] "etcd-old-k8s-version-128377" [57d9a965-4f1a-455f-beec-16601bd921e2] Running
I1124 09:05:32.587393 695520 system_pods.go:89] "kindnet-gbp66" [49954742-ea7f-466f-80d8-7d6ac88ce36c] Running
I1124 09:05:32.587397 695520 system_pods.go:89] "kube-apiserver-old-k8s-version-128377" [08c8bb94-e597-4293-80f1-0981f51b22a4] Running
I1124 09:05:32.587402 695520 system_pods.go:89] "kube-controller-manager-old-k8s-version-128377" [1f721a4b-e1c3-4e18-92b4-13673dc37600] Running
I1124 09:05:32.587405 695520 system_pods.go:89] "kube-proxy-fpbs2" [52128126-550d-4795-9fa1-e1d3d9510dd3] Running
I1124 09:05:32.587408 695520 system_pods.go:89] "kube-scheduler-old-k8s-version-128377" [399dcc23-9970-4146-82b3-c72d3e5f621b] Running
I1124 09:05:32.587411 695520 system_pods.go:89] "storage-provisioner" [7e4f56c0-0b49-47cd-9278-129ad898b781] Running
I1124 09:05:32.587420 695520 system_pods.go:126] duration metric: took 926.463548ms to wait for k8s-apps to be running ...
I1124 09:05:32.587428 695520 system_svc.go:44] waiting for kubelet service to be running ....
I1124 09:05:32.587503 695520 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 09:05:32.602305 695520 system_svc.go:56] duration metric: took 14.864147ms WaitForService to wait for kubelet
I1124 09:05:32.602336 695520 kubeadm.go:587] duration metric: took 15.340181249s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 09:05:32.602385 695520 node_conditions.go:102] verifying NodePressure condition ...
I1124 09:05:32.605212 695520 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1124 09:05:32.605242 695520 node_conditions.go:123] node cpu capacity is 8
I1124 09:05:32.605271 695520 node_conditions.go:105] duration metric: took 2.87532ms to run NodePressure ...
I1124 09:05:32.605293 695520 start.go:242] waiting for startup goroutines ...
I1124 09:05:32.605308 695520 start.go:247] waiting for cluster config update ...
I1124 09:05:32.605327 695520 start.go:256] writing updated cluster config ...
I1124 09:05:32.605690 695520 ssh_runner.go:195] Run: rm -f paused
I1124 09:05:32.610319 695520 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 09:05:32.614557 695520 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-vxxnm" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:32.619322 695520 pod_ready.go:94] pod "coredns-5dd5756b68-vxxnm" is "Ready"
I1124 09:05:32.619349 695520 pod_ready.go:86] duration metric: took 4.765973ms for pod "coredns-5dd5756b68-vxxnm" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:32.622417 695520 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:32.626873 695520 pod_ready.go:94] pod "etcd-old-k8s-version-128377" is "Ready"
I1124 09:05:32.626900 695520 pod_ready.go:86] duration metric: took 4.45394ms for pod "etcd-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:32.629800 695520 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:32.634310 695520 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-128377" is "Ready"
I1124 09:05:32.634338 695520 pod_ready.go:86] duration metric: took 4.514426ms for pod "kube-apiserver-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:32.637382 695520 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:33.015375 695520 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-128377" is "Ready"
I1124 09:05:33.015406 695520 pod_ready.go:86] duration metric: took 378.000797ms for pod "kube-controller-manager-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:33.215146 695520 pod_ready.go:83] waiting for pod "kube-proxy-fpbs2" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:33.614362 695520 pod_ready.go:94] pod "kube-proxy-fpbs2" is "Ready"
I1124 09:05:33.614392 695520 pod_ready.go:86] duration metric: took 399.215049ms for pod "kube-proxy-fpbs2" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:33.815166 695520 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.214969 695520 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-128377" is "Ready"
I1124 09:05:34.214999 695520 pod_ready.go:86] duration metric: took 399.806564ms for pod "kube-scheduler-old-k8s-version-128377" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.215011 695520 pod_ready.go:40] duration metric: took 1.604660669s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 09:05:34.261989 695520 start.go:625] kubectl: 1.34.2, cluster: 1.28.0 (minor skew: 6)
I1124 09:05:34.263612 695520 out.go:203]
W1124 09:05:34.264723 695520 out.go:285] ! /usr/local/bin/kubectl is version 1.34.2, which may have incompatibilities with Kubernetes 1.28.0.
I1124 09:05:34.265770 695520 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1124 09:05:34.267170 695520 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-128377" cluster and "default" namespace by default
I1124 09:05:32.422898 685562 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1124 09:05:32.423021 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 09:05:32.423106 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 09:05:32.453902 685562 cri.go:89] found id: "1c3ac7689834f46a67038f7d9d8749dd11964dbb2214dc5f58152210452bc365"
I1124 09:05:32.453922 685562 cri.go:89] found id: "4d75c0e16a149ca1a7ec4e96d68718e51659aa9619085a44b28b38f4a7716680"
I1124 09:05:32.453927 685562 cri.go:89] found id: "7359853367f0edc54ad7b43f974b25c5e084487a9f1f0e85d38c8ad9736fcd00"
I1124 09:05:32.453929 685562 cri.go:89] found id: ""
I1124 09:05:32.453937 685562 logs.go:282] 3 containers: [1c3ac7689834f46a67038f7d9d8749dd11964dbb2214dc5f58152210452bc365 4d75c0e16a149ca1a7ec4e96d68718e51659aa9619085a44b28b38f4a7716680 7359853367f0edc54ad7b43f974b25c5e084487a9f1f0e85d38c8ad9736fcd00]
I1124 09:05:32.454000 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.458469 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.462439 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.466262 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 09:05:32.466335 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 09:05:32.496086 685562 cri.go:89] found id: "b0f5e195a2427e1475b232369ca31232e850412d5ccf99c87ab9d6ef0d230ec2"
I1124 09:05:32.496112 685562 cri.go:89] found id: ""
I1124 09:05:32.496122 685562 logs.go:282] 1 containers: [b0f5e195a2427e1475b232369ca31232e850412d5ccf99c87ab9d6ef0d230ec2]
I1124 09:05:32.496186 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.500443 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 09:05:32.500532 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 09:05:32.528567 685562 cri.go:89] found id: ""
I1124 09:05:32.528602 685562 logs.go:282] 0 containers: []
W1124 09:05:32.528610 685562 logs.go:284] No container was found matching "coredns"
I1124 09:05:32.528617 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 09:05:32.528677 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 09:05:32.557355 685562 cri.go:89] found id: "b018c37b5155a45849bf7701c25cfd1ff2e5d08a4a174fd7447b3d1e5014bc17"
I1124 09:05:32.557375 685562 cri.go:89] found id: "beba2c039cf143777ad7314b49e8a78d52025ed5525530635c9debdb1ab66ce9"
I1124 09:05:32.557379 685562 cri.go:89] found id: ""
I1124 09:05:32.557388 685562 logs.go:282] 2 containers: [b018c37b5155a45849bf7701c25cfd1ff2e5d08a4a174fd7447b3d1e5014bc17 beba2c039cf143777ad7314b49e8a78d52025ed5525530635c9debdb1ab66ce9]
I1124 09:05:32.557445 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.561666 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.565691 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 09:05:32.565776 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 09:05:32.594818 685562 cri.go:89] found id: ""
I1124 09:05:32.594841 685562 logs.go:282] 0 containers: []
W1124 09:05:32.594848 685562 logs.go:284] No container was found matching "kube-proxy"
I1124 09:05:32.594855 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 09:05:32.594900 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 09:05:32.625049 685562 cri.go:89] found id: "4fe764a0d4480b2b9c1a7e51dc63c845a71b6a2a78a4861dbbf794ad3bd3079d"
I1124 09:05:32.625068 685562 cri.go:89] found id: "87fb36f1d5c6bc7114bcd8099f1af4b27cea41c648c6e97f4789f111172ccbb0"
I1124 09:05:32.625073 685562 cri.go:89] found id: "c70fdaa8d0b65a6cc40d923809782c40bad08a66e1cd7ef35c3bd63e2344a7d0"
I1124 09:05:32.625078 685562 cri.go:89] found id: ""
I1124 09:05:32.625087 685562 logs.go:282] 3 containers: [4fe764a0d4480b2b9c1a7e51dc63c845a71b6a2a78a4861dbbf794ad3bd3079d 87fb36f1d5c6bc7114bcd8099f1af4b27cea41c648c6e97f4789f111172ccbb0 c70fdaa8d0b65a6cc40d923809782c40bad08a66e1cd7ef35c3bd63e2344a7d0]
I1124 09:05:32.625142 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.630042 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.634965 685562 ssh_runner.go:195] Run: which crictl
I1124 09:05:32.639315 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 09:05:32.639376 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 09:05:32.669355 685562 cri.go:89] found id: ""
I1124 09:05:32.669384 685562 logs.go:282] 0 containers: []
W1124 09:05:32.669392 685562 logs.go:284] No container was found matching "kindnet"
I1124 09:05:32.669398 685562 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 09:05:32.669449 685562 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 09:05:32.697559 685562 cri.go:89] found id: ""
I1124 09:05:32.697586 685562 logs.go:282] 0 containers: []
W1124 09:05:32.697596 685562 logs.go:284] No container was found matching "storage-provisioner"
I1124 09:05:32.697610 685562 logs.go:123] Gathering logs for containerd ...
I1124 09:05:32.697645 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 09:05:32.736120 685562 logs.go:123] Gathering logs for container status ...
I1124 09:05:32.736153 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 09:05:32.768484 685562 logs.go:123] Gathering logs for kubelet ...
I1124 09:05:32.768526 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 09:05:32.836058 685562 logs.go:123] Gathering logs for dmesg ...
I1124 09:05:32.836100 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 09:05:32.853541 685562 logs.go:123] Gathering logs for describe nodes ...
I1124 09:05:32.853613 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1124 09:05:33.384739 696018 node_ready.go:49] node "no-preload-820576" is "Ready"
I1124 09:05:33.384778 696018 node_ready.go:38] duration metric: took 14.503869435s for node "no-preload-820576" to be "Ready" ...
I1124 09:05:33.384797 696018 api_server.go:52] waiting for apiserver process to appear ...
I1124 09:05:33.384861 696018 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1124 09:05:33.401268 696018 api_server.go:72] duration metric: took 14.81022929s to wait for apiserver process to appear ...
I1124 09:05:33.401299 696018 api_server.go:88] waiting for apiserver healthz status ...
I1124 09:05:33.401324 696018 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8443/healthz ...
I1124 09:05:33.406015 696018 api_server.go:279] https://192.168.85.2:8443/healthz returned 200:
ok
I1124 09:05:33.407175 696018 api_server.go:141] control plane version: v1.35.0-beta.0
I1124 09:05:33.407215 696018 api_server.go:131] duration metric: took 5.908148ms to wait for apiserver health ...
I1124 09:05:33.407226 696018 system_pods.go:43] waiting for kube-system pods to appear ...
I1124 09:05:33.410293 696018 system_pods.go:59] 8 kube-system pods found
I1124 09:05:33.410331 696018 system_pods.go:61] "coredns-7d764666f9-b6dpn" [c84a0b09-07a2-4e6a-928a-b9aca9e3b1a1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:33.410338 696018 system_pods.go:61] "etcd-no-preload-820576" [39f892d7-184f-4858-be8f-174718ac6aaf] Running
I1124 09:05:33.410346 696018 system_pods.go:61] "kindnet-kvm52" [967c23e8-7e42-4034-b5a2-e4cd65bc4d94] Running
I1124 09:05:33.410352 696018 system_pods.go:61] "kube-apiserver-no-preload-820576" [d5294a7a-2337-4ef4-82a2-20d85daf8739] Running
I1124 09:05:33.410360 696018 system_pods.go:61] "kube-controller-manager-no-preload-820576" [e6320a0d-f5cf-4a17-af3d-6fa87f1e02ad] Running
I1124 09:05:33.410365 696018 system_pods.go:61] "kube-proxy-vz24l" [4a64a474-1e1b-411d-aea6-9d12e1d9f84e] Running
I1124 09:05:33.410369 696018 system_pods.go:61] "kube-scheduler-no-preload-820576" [9fd536e3-1a01-4c16-bf46-75db8f38b3f4] Running
I1124 09:05:33.410382 696018 system_pods.go:61] "storage-provisioner" [144d237b-4f80-441d-867b-0ee26edd8590] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:33.410391 696018 system_pods.go:74] duration metric: took 3.156993ms to wait for pod list to return data ...
I1124 09:05:33.410403 696018 default_sa.go:34] waiting for default service account to be created ...
I1124 09:05:33.413158 696018 default_sa.go:45] found service account: "default"
I1124 09:05:33.413182 696018 default_sa.go:55] duration metric: took 2.772178ms for default service account to be created ...
I1124 09:05:33.413192 696018 system_pods.go:116] waiting for k8s-apps to be running ...
I1124 09:05:33.416818 696018 system_pods.go:86] 8 kube-system pods found
I1124 09:05:33.416849 696018 system_pods.go:89] "coredns-7d764666f9-b6dpn" [c84a0b09-07a2-4e6a-928a-b9aca9e3b1a1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:33.416856 696018 system_pods.go:89] "etcd-no-preload-820576" [39f892d7-184f-4858-be8f-174718ac6aaf] Running
I1124 09:05:33.416863 696018 system_pods.go:89] "kindnet-kvm52" [967c23e8-7e42-4034-b5a2-e4cd65bc4d94] Running
I1124 09:05:33.416868 696018 system_pods.go:89] "kube-apiserver-no-preload-820576" [d5294a7a-2337-4ef4-82a2-20d85daf8739] Running
I1124 09:05:33.416874 696018 system_pods.go:89] "kube-controller-manager-no-preload-820576" [e6320a0d-f5cf-4a17-af3d-6fa87f1e02ad] Running
I1124 09:05:33.416879 696018 system_pods.go:89] "kube-proxy-vz24l" [4a64a474-1e1b-411d-aea6-9d12e1d9f84e] Running
I1124 09:05:33.416884 696018 system_pods.go:89] "kube-scheduler-no-preload-820576" [9fd536e3-1a01-4c16-bf46-75db8f38b3f4] Running
I1124 09:05:33.416891 696018 system_pods.go:89] "storage-provisioner" [144d237b-4f80-441d-867b-0ee26edd8590] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:33.416935 696018 retry.go:31] will retry after 275.944352ms: missing components: kube-dns
I1124 09:05:33.697203 696018 system_pods.go:86] 8 kube-system pods found
I1124 09:05:33.697247 696018 system_pods.go:89] "coredns-7d764666f9-b6dpn" [c84a0b09-07a2-4e6a-928a-b9aca9e3b1a1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:33.697259 696018 system_pods.go:89] "etcd-no-preload-820576" [39f892d7-184f-4858-be8f-174718ac6aaf] Running
I1124 09:05:33.697269 696018 system_pods.go:89] "kindnet-kvm52" [967c23e8-7e42-4034-b5a2-e4cd65bc4d94] Running
I1124 09:05:33.697274 696018 system_pods.go:89] "kube-apiserver-no-preload-820576" [d5294a7a-2337-4ef4-82a2-20d85daf8739] Running
I1124 09:05:33.697285 696018 system_pods.go:89] "kube-controller-manager-no-preload-820576" [e6320a0d-f5cf-4a17-af3d-6fa87f1e02ad] Running
I1124 09:05:33.697290 696018 system_pods.go:89] "kube-proxy-vz24l" [4a64a474-1e1b-411d-aea6-9d12e1d9f84e] Running
I1124 09:05:33.697297 696018 system_pods.go:89] "kube-scheduler-no-preload-820576" [9fd536e3-1a01-4c16-bf46-75db8f38b3f4] Running
I1124 09:05:33.697304 696018 system_pods.go:89] "storage-provisioner" [144d237b-4f80-441d-867b-0ee26edd8590] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:33.697327 696018 retry.go:31] will retry after 278.68714ms: missing components: kube-dns
I1124 09:05:33.979933 696018 system_pods.go:86] 8 kube-system pods found
I1124 09:05:33.979971 696018 system_pods.go:89] "coredns-7d764666f9-b6dpn" [c84a0b09-07a2-4e6a-928a-b9aca9e3b1a1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 09:05:33.979977 696018 system_pods.go:89] "etcd-no-preload-820576" [39f892d7-184f-4858-be8f-174718ac6aaf] Running
I1124 09:05:33.979984 696018 system_pods.go:89] "kindnet-kvm52" [967c23e8-7e42-4034-b5a2-e4cd65bc4d94] Running
I1124 09:05:33.979987 696018 system_pods.go:89] "kube-apiserver-no-preload-820576" [d5294a7a-2337-4ef4-82a2-20d85daf8739] Running
I1124 09:05:33.979991 696018 system_pods.go:89] "kube-controller-manager-no-preload-820576" [e6320a0d-f5cf-4a17-af3d-6fa87f1e02ad] Running
I1124 09:05:33.979994 696018 system_pods.go:89] "kube-proxy-vz24l" [4a64a474-1e1b-411d-aea6-9d12e1d9f84e] Running
I1124 09:05:33.979998 696018 system_pods.go:89] "kube-scheduler-no-preload-820576" [9fd536e3-1a01-4c16-bf46-75db8f38b3f4] Running
I1124 09:05:33.980003 696018 system_pods.go:89] "storage-provisioner" [144d237b-4f80-441d-867b-0ee26edd8590] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 09:05:33.980020 696018 retry.go:31] will retry after 448.083964ms: missing components: kube-dns
I1124 09:05:34.432301 696018 system_pods.go:86] 8 kube-system pods found
I1124 09:05:34.432341 696018 system_pods.go:89] "coredns-7d764666f9-b6dpn" [c84a0b09-07a2-4e6a-928a-b9aca9e3b1a1] Running
I1124 09:05:34.432350 696018 system_pods.go:89] "etcd-no-preload-820576" [39f892d7-184f-4858-be8f-174718ac6aaf] Running
I1124 09:05:34.432355 696018 system_pods.go:89] "kindnet-kvm52" [967c23e8-7e42-4034-b5a2-e4cd65bc4d94] Running
I1124 09:05:34.432362 696018 system_pods.go:89] "kube-apiserver-no-preload-820576" [d5294a7a-2337-4ef4-82a2-20d85daf8739] Running
I1124 09:05:34.432369 696018 system_pods.go:89] "kube-controller-manager-no-preload-820576" [e6320a0d-f5cf-4a17-af3d-6fa87f1e02ad] Running
I1124 09:05:34.432374 696018 system_pods.go:89] "kube-proxy-vz24l" [4a64a474-1e1b-411d-aea6-9d12e1d9f84e] Running
I1124 09:05:34.432379 696018 system_pods.go:89] "kube-scheduler-no-preload-820576" [9fd536e3-1a01-4c16-bf46-75db8f38b3f4] Running
I1124 09:05:34.432384 696018 system_pods.go:89] "storage-provisioner" [144d237b-4f80-441d-867b-0ee26edd8590] Running
I1124 09:05:34.432395 696018 system_pods.go:126] duration metric: took 1.019195458s to wait for k8s-apps to be running ...
I1124 09:05:34.432410 696018 system_svc.go:44] waiting for kubelet service to be running ....
I1124 09:05:34.432534 696018 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 09:05:34.451401 696018 system_svc.go:56] duration metric: took 18.978773ms WaitForService to wait for kubelet
I1124 09:05:34.451444 696018 kubeadm.go:587] duration metric: took 15.860405681s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 09:05:34.451483 696018 node_conditions.go:102] verifying NodePressure condition ...
I1124 09:05:34.454386 696018 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1124 09:05:34.454410 696018 node_conditions.go:123] node cpu capacity is 8
I1124 09:05:34.454427 696018 node_conditions.go:105] duration metric: took 2.938205ms to run NodePressure ...
I1124 09:05:34.454440 696018 start.go:242] waiting for startup goroutines ...
I1124 09:05:34.454450 696018 start.go:247] waiting for cluster config update ...
I1124 09:05:34.454478 696018 start.go:256] writing updated cluster config ...
I1124 09:05:34.454771 696018 ssh_runner.go:195] Run: rm -f paused
I1124 09:05:34.459160 696018 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 09:05:34.462567 696018 pod_ready.go:83] waiting for pod "coredns-7d764666f9-b6dpn" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.466303 696018 pod_ready.go:94] pod "coredns-7d764666f9-b6dpn" is "Ready"
I1124 09:05:34.466324 696018 pod_ready.go:86] duration metric: took 3.738029ms for pod "coredns-7d764666f9-b6dpn" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.468156 696018 pod_ready.go:83] waiting for pod "etcd-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.471750 696018 pod_ready.go:94] pod "etcd-no-preload-820576" is "Ready"
I1124 09:05:34.471775 696018 pod_ready.go:86] duration metric: took 3.597676ms for pod "etcd-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.473507 696018 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.477092 696018 pod_ready.go:94] pod "kube-apiserver-no-preload-820576" is "Ready"
I1124 09:05:34.477115 696018 pod_ready.go:86] duration metric: took 3.588223ms for pod "kube-apiserver-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.478724 696018 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:34.862953 696018 pod_ready.go:94] pod "kube-controller-manager-no-preload-820576" is "Ready"
I1124 09:05:34.862977 696018 pod_ready.go:86] duration metric: took 384.235741ms for pod "kube-controller-manager-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:35.063039 696018 pod_ready.go:83] waiting for pod "kube-proxy-vz24l" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:35.463183 696018 pod_ready.go:94] pod "kube-proxy-vz24l" is "Ready"
I1124 09:05:35.463217 696018 pod_ready.go:86] duration metric: took 400.149042ms for pod "kube-proxy-vz24l" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:35.664151 696018 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:36.063590 696018 pod_ready.go:94] pod "kube-scheduler-no-preload-820576" is "Ready"
I1124 09:05:36.063619 696018 pod_ready.go:86] duration metric: took 399.441074ms for pod "kube-scheduler-no-preload-820576" in "kube-system" namespace to be "Ready" or be gone ...
I1124 09:05:36.063632 696018 pod_ready.go:40] duration metric: took 1.604443296s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 09:05:36.110852 696018 start.go:625] kubectl: 1.34.2, cluster: 1.35.0-beta.0 (minor skew: 1)
I1124 09:05:36.112796 696018 out.go:179] * Done! kubectl is now configured to use "no-preload-820576" cluster and "default" namespace by default
I1124 09:05:43.195573 685562 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (10.341935277s)
W1124 09:05:43.195644 685562 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0-beta.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Get "https://localhost:8443/api/v1/nodes?limit=500": dial tcp [::1]:8443: connect: connection refused - error from a previous attempt: read tcp [::1]:44544->[::1]:8443: read: connection reset by peer
output:
** stderr **
Get "https://localhost:8443/api/v1/nodes?limit=500": dial tcp [::1]:8443: connect: connection refused - error from a previous attempt: read tcp [::1]:44544->[::1]:8443: read: connection reset by peer
** /stderr **
I1124 09:05:43.195660 685562 logs.go:123] Gathering logs for kube-apiserver [1c3ac7689834f46a67038f7d9d8749dd11964dbb2214dc5f58152210452bc365] ...
I1124 09:05:43.195679 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1c3ac7689834f46a67038f7d9d8749dd11964dbb2214dc5f58152210452bc365"
I1124 09:05:43.229092 685562 logs.go:123] Gathering logs for kube-apiserver [4d75c0e16a149ca1a7ec4e96d68718e51659aa9619085a44b28b38f4a7716680] ...
I1124 09:05:43.229122 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4d75c0e16a149ca1a7ec4e96d68718e51659aa9619085a44b28b38f4a7716680"
W1124 09:05:43.256709 685562 logs.go:130] failed kube-apiserver [4d75c0e16a149ca1a7ec4e96d68718e51659aa9619085a44b28b38f4a7716680]: command: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4d75c0e16a149ca1a7ec4e96d68718e51659aa9619085a44b28b38f4a7716680" /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4d75c0e16a149ca1a7ec4e96d68718e51659aa9619085a44b28b38f4a7716680": Process exited with status 1
stdout:
stderr:
E1124 09:05:43.254237 2218 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"4d75c0e16a149ca1a7ec4e96d68718e51659aa9619085a44b28b38f4a7716680\": not found" containerID="4d75c0e16a149ca1a7ec4e96d68718e51659aa9619085a44b28b38f4a7716680"
time="2025-11-24T09:05:43Z" level=fatal msg="rpc error: code = NotFound desc = an error occurred when try to find container \"4d75c0e16a149ca1a7ec4e96d68718e51659aa9619085a44b28b38f4a7716680\": not found"
output:
** stderr **
E1124 09:05:43.254237 2218 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"4d75c0e16a149ca1a7ec4e96d68718e51659aa9619085a44b28b38f4a7716680\": not found" containerID="4d75c0e16a149ca1a7ec4e96d68718e51659aa9619085a44b28b38f4a7716680"
time="2025-11-24T09:05:43Z" level=fatal msg="rpc error: code = NotFound desc = an error occurred when try to find container \"4d75c0e16a149ca1a7ec4e96d68718e51659aa9619085a44b28b38f4a7716680\": not found"
** /stderr **
I1124 09:05:43.256732 685562 logs.go:123] Gathering logs for kube-apiserver [7359853367f0edc54ad7b43f974b25c5e084487a9f1f0e85d38c8ad9736fcd00] ...
I1124 09:05:43.256745 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7359853367f0edc54ad7b43f974b25c5e084487a9f1f0e85d38c8ad9736fcd00"
I1124 09:05:43.296899 685562 logs.go:123] Gathering logs for kube-scheduler [b018c37b5155a45849bf7701c25cfd1ff2e5d08a4a174fd7447b3d1e5014bc17] ...
I1124 09:05:43.296933 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b018c37b5155a45849bf7701c25cfd1ff2e5d08a4a174fd7447b3d1e5014bc17"
I1124 09:05:43.327780 685562 logs.go:123] Gathering logs for kube-scheduler [beba2c039cf143777ad7314b49e8a78d52025ed5525530635c9debdb1ab66ce9] ...
I1124 09:05:43.327805 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 beba2c039cf143777ad7314b49e8a78d52025ed5525530635c9debdb1ab66ce9"
I1124 09:05:43.363107 685562 logs.go:123] Gathering logs for etcd [b0f5e195a2427e1475b232369ca31232e850412d5ccf99c87ab9d6ef0d230ec2] ...
I1124 09:05:43.363150 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0f5e195a2427e1475b232369ca31232e850412d5ccf99c87ab9d6ef0d230ec2"
I1124 09:05:43.395896 685562 logs.go:123] Gathering logs for kube-controller-manager [4fe764a0d4480b2b9c1a7e51dc63c845a71b6a2a78a4861dbbf794ad3bd3079d] ...
I1124 09:05:43.395929 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4fe764a0d4480b2b9c1a7e51dc63c845a71b6a2a78a4861dbbf794ad3bd3079d"
I1124 09:05:43.423650 685562 logs.go:123] Gathering logs for kube-controller-manager [87fb36f1d5c6bc7114bcd8099f1af4b27cea41c648c6e97f4789f111172ccbb0] ...
I1124 09:05:43.423680 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 87fb36f1d5c6bc7114bcd8099f1af4b27cea41c648c6e97f4789f111172ccbb0"
I1124 09:05:43.453581 685562 logs.go:123] Gathering logs for kube-controller-manager [c70fdaa8d0b65a6cc40d923809782c40bad08a66e1cd7ef35c3bd63e2344a7d0] ...
I1124 09:05:43.453608 685562 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c70fdaa8d0b65a6cc40d923809782c40bad08a66e1cd7ef35c3bd63e2344a7d0"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
92908e44718b7 56cc512116c8f 9 seconds ago Running busybox 0 1ee15af433557 busybox default
a7a841ea7303a ead0a4a53df89 14 seconds ago Running coredns 0 5cd1e9dd6b4b4 coredns-5dd5756b68-vxxnm kube-system
a9a5857553e67 6e38f40d628db 14 seconds ago Running storage-provisioner 0 6128b1854bc49 storage-provisioner kube-system
818537e08c060 409467f978b4a 25 seconds ago Running kindnet-cni 0 cd819a24f784f kindnet-gbp66 kube-system
370631aaaf577 ea1030da44aa1 28 seconds ago Running kube-proxy 0 17a629fbc9de7 kube-proxy-fpbs2 kube-system
f5eddecfb179f f6f496300a2ae 47 seconds ago Running kube-scheduler 0 d4658a7b318ec kube-scheduler-old-k8s-version-128377 kube-system
5d9ec22e03b8b 4be79c38a4bab 47 seconds ago Running kube-controller-manager 0 f3a2eced02a3b kube-controller-manager-old-k8s-version-128377 kube-system
842bd9db2d84b bb5e0dde9054c 47 seconds ago Running kube-apiserver 0 879c975eb1a53 kube-apiserver-old-k8s-version-128377 kube-system
8df3112d99751 73deb9a3f7025 47 seconds ago Running etcd 0 78f7483f85b14 etcd-old-k8s-version-128377 kube-system
==> containerd <==
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.013913791Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-5dd5756b68-vxxnm,Uid:b84bae0f-9f75-4d1c-b2ed-da0c10a141cf,Namespace:kube-system,Attempt:0,} returns sandbox id \"5cd1e9dd6b4b4d2ac225fd496f6fac6cfc490bdb385b217119ffd695f763abf3\""
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.016899714Z" level=info msg="CreateContainer within sandbox \"5cd1e9dd6b4b4d2ac225fd496f6fac6cfc490bdb385b217119ffd695f763abf3\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.024116931Z" level=info msg="Container a7a841ea7303a40b7b557fbe769c57a1562346d875b1853a8a729ad668090cb5: CDI devices from CRI Config.CDIDevices: []"
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.030290587Z" level=info msg="CreateContainer within sandbox \"5cd1e9dd6b4b4d2ac225fd496f6fac6cfc490bdb385b217119ffd695f763abf3\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"a7a841ea7303a40b7b557fbe769c57a1562346d875b1853a8a729ad668090cb5\""
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.030773995Z" level=info msg="StartContainer for \"a7a841ea7303a40b7b557fbe769c57a1562346d875b1853a8a729ad668090cb5\""
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.031567693Z" level=info msg="connecting to shim a7a841ea7303a40b7b557fbe769c57a1562346d875b1853a8a729ad668090cb5" address="unix:///run/containerd/s/7e80e31b141e93e01901781df29b4edcac7d62ec3fd02a2cc1cde1ffde438980" protocol=ttrpc version=3
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.070950416Z" level=info msg="StartContainer for \"a9a5857553e67019e47641c1970bb0d5555afd6b608c94a94501dd485efac0c4\" returns successfully"
Nov 24 09:05:32 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:32.075707267Z" level=info msg="StartContainer for \"a7a841ea7303a40b7b557fbe769c57a1562346d875b1853a8a729ad668090cb5\" returns successfully"
Nov 24 09:05:34 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:34.747845169Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:bfaec734-d874-4dcb-b31f-feb87adccfca,Namespace:default,Attempt:0,}"
Nov 24 09:05:34 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:34.786693345Z" level=info msg="connecting to shim 1ee15af4335571d5c2c1f8cf460b21232bfc82973349a4c00a86f5a2545492a2" address="unix:///run/containerd/s/b51cd8663d01a7c675d7f65aecc44f4b6281e3382088734fe56170e879775890" namespace=k8s.io protocol=ttrpc version=3
Nov 24 09:05:34 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:34.851781414Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:bfaec734-d874-4dcb-b31f-feb87adccfca,Namespace:default,Attempt:0,} returns sandbox id \"1ee15af4335571d5c2c1f8cf460b21232bfc82973349a4c00a86f5a2545492a2\""
Nov 24 09:05:34 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:34.853515051Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.357982384Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.358604580Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396643"
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.359790616Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.361443799Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.361898949Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.508337162s"
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.361934177Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.363533599Z" level=info msg="CreateContainer within sandbox \"1ee15af4335571d5c2c1f8cf460b21232bfc82973349a4c00a86f5a2545492a2\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.369396201Z" level=info msg="Container 92908e44718b76213a4fd87e310efd757d73940a581879283782328fd7a0dfa9: CDI devices from CRI Config.CDIDevices: []"
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.374660363Z" level=info msg="CreateContainer within sandbox \"1ee15af4335571d5c2c1f8cf460b21232bfc82973349a4c00a86f5a2545492a2\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"92908e44718b76213a4fd87e310efd757d73940a581879283782328fd7a0dfa9\""
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.375182989Z" level=info msg="StartContainer for \"92908e44718b76213a4fd87e310efd757d73940a581879283782328fd7a0dfa9\""
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.376051696Z" level=info msg="connecting to shim 92908e44718b76213a4fd87e310efd757d73940a581879283782328fd7a0dfa9" address="unix:///run/containerd/s/b51cd8663d01a7c675d7f65aecc44f4b6281e3382088734fe56170e879775890" protocol=ttrpc version=3
Nov 24 09:05:37 old-k8s-version-128377 containerd[661]: time="2025-11-24T09:05:37.425776823Z" level=info msg="StartContainer for \"92908e44718b76213a4fd87e310efd757d73940a581879283782328fd7a0dfa9\" returns successfully"
Nov 24 09:05:43 old-k8s-version-128377 containerd[661]: E1124 09:05:43.526421 661 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [a7a841ea7303a40b7b557fbe769c57a1562346d875b1853a8a729ad668090cb5] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 25cf5af2951e282c4b0e961a02fb5d3e57c974501832fee92eec17b5135b9ec9d9e87d2ac94e6d117a5ed3dd54e8800aa7b4479706eb54497145ccdb80397d1b
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:54326 - 65005 "HINFO IN 6565264189616162908.3935264129304859187. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.029224592s
==> describe nodes <==
Name: old-k8s-version-128377
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-128377
kubernetes.io/os=linux
minikube.k8s.io/commit=393ee3e0b845623107dce6cda4f48ffd5c3d1811
minikube.k8s.io/name=old-k8s-version-128377
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_24T09_05_05_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 24 Nov 2025 09:05:01 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-128377
AcquireTime: <unset>
RenewTime: Mon, 24 Nov 2025 09:05:44 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 24 Nov 2025 09:05:35 +0000 Mon, 24 Nov 2025 09:05:00 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 24 Nov 2025 09:05:35 +0000 Mon, 24 Nov 2025 09:05:00 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 24 Nov 2025 09:05:35 +0000 Mon, 24 Nov 2025 09:05:00 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 24 Nov 2025 09:05:35 +0000 Mon, 24 Nov 2025 09:05:31 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.103.2
Hostname: old-k8s-version-128377
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
System Info:
Machine ID: 9629f1d5bc1ed524a56ce23c69214c09
System UUID: 220a6d4b-4a36-435b-ad8f-2d418f4618a1
Boot ID: f052cd47-57de-4521-b5fb-139979fdced9
Kernel Version: 6.8.0-1044-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12s
kube-system coredns-5dd5756b68-vxxnm 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 29s
kube-system etcd-old-k8s-version-128377 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 42s
kube-system kindnet-gbp66 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 29s
kube-system kube-apiserver-old-k8s-version-128377 250m (3%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system kube-controller-manager-old-k8s-version-128377 200m (2%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system kube-proxy-fpbs2 0 (0%) 0 (0%) 0 (0%) 0 (0%) 29s
kube-system kube-scheduler-old-k8s-version-128377 100m (1%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 29s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 28s kube-proxy
Normal Starting 48s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 47s (x8 over 47s) kubelet Node old-k8s-version-128377 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 47s (x8 over 47s) kubelet Node old-k8s-version-128377 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 47s (x7 over 47s) kubelet Node old-k8s-version-128377 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 47s kubelet Updated Node Allocatable limit across pods
Normal Starting 42s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 42s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 42s kubelet Node old-k8s-version-128377 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 42s kubelet Node old-k8s-version-128377 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 42s kubelet Node old-k8s-version-128377 status is now: NodeHasSufficientPID
Normal RegisteredNode 30s node-controller Node old-k8s-version-128377 event: Registered Node old-k8s-version-128377 in Controller
Normal NodeReady 15s kubelet Node old-k8s-version-128377 status is now: NodeReady
==> dmesg <==
==> etcd [8df3112d99751cf0ed66add055e0df50e3c944dbb66b787e2e3ae37efbec7d4e] <==
{"level":"info","ts":"2025-11-24T09:05:00.107581Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-24T09:05:00.107626Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"3336683c081d149d","local-member-id":"f23060b075c4c089","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T09:05:00.107753Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T09:05:00.10778Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T09:05:00.10887Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-24T09:05:00.108869Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.103.2:2379"}
{"level":"info","ts":"2025-11-24T09:05:01.710895Z","caller":"traceutil/trace.go:171","msg":"trace[1442253581] transaction","detail":"{read_only:false; response_revision:20; number_of_response:1; }","duration":"170.61339ms","start":"2025-11-24T09:05:01.540258Z","end":"2025-11-24T09:05:01.710871Z","steps":["trace[1442253581] 'process raft request' (duration: 170.544438ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:01.711011Z","caller":"traceutil/trace.go:171","msg":"trace[699662152] transaction","detail":"{read_only:false; response_revision:19; number_of_response:1; }","duration":"172.264745ms","start":"2025-11-24T09:05:01.538726Z","end":"2025-11-24T09:05:01.710991Z","steps":["trace[699662152] 'process raft request' (duration: 172.04013ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-24T09:05:01.711031Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"172.576061ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/certificatesigningrequests/csr-9x9d8\" ","response":"range_response_count:1 size:895"}
{"level":"info","ts":"2025-11-24T09:05:01.710896Z","caller":"traceutil/trace.go:171","msg":"trace[1006472868] transaction","detail":"{read_only:false; response_revision:18; number_of_response:1; }","duration":"172.691781ms","start":"2025-11-24T09:05:01.538162Z","end":"2025-11-24T09:05:01.710854Z","steps":["trace[1006472868] 'process raft request' (duration: 109.125575ms)","trace[1006472868] 'compare' (duration: 63.355357ms)"],"step_count":2}
{"level":"info","ts":"2025-11-24T09:05:01.710915Z","caller":"traceutil/trace.go:171","msg":"trace[981263403] transaction","detail":"{read_only:false; response_revision:21; number_of_response:1; }","duration":"170.391166ms","start":"2025-11-24T09:05:01.540518Z","end":"2025-11-24T09:05:01.710909Z","steps":["trace[981263403] 'process raft request' (duration: 170.307811ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:01.711086Z","caller":"traceutil/trace.go:171","msg":"trace[1918024405] range","detail":"{range_begin:/registry/certificatesigningrequests/csr-9x9d8; range_end:; response_count:1; response_revision:22; }","duration":"172.654948ms","start":"2025-11-24T09:05:01.538422Z","end":"2025-11-24T09:05:01.711077Z","steps":["trace[1918024405] 'agreement among raft nodes before linearized reading' (duration: 172.512588ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:01.710914Z","caller":"traceutil/trace.go:171","msg":"trace[1488131719] linearizableReadLoop","detail":"{readStateIndex:22; appliedIndex:18; }","duration":"172.460174ms","start":"2025-11-24T09:05:01.53844Z","end":"2025-11-24T09:05:01.7109Z","steps":["trace[1488131719] 'read index received' (duration: 25.895675ms)","trace[1488131719] 'applied index is now lower than readState.Index' (duration: 146.559971ms)"],"step_count":2}
{"level":"info","ts":"2025-11-24T09:05:01.711054Z","caller":"traceutil/trace.go:171","msg":"trace[1678514513] transaction","detail":"{read_only:false; response_revision:22; number_of_response:1; }","duration":"149.8797ms","start":"2025-11-24T09:05:01.561163Z","end":"2025-11-24T09:05:01.711042Z","steps":["trace[1678514513] 'process raft request' (duration: 149.700045ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:01.711435Z","caller":"traceutil/trace.go:171","msg":"trace[2085549652] transaction","detail":"{read_only:false; response_revision:23; number_of_response:1; }","duration":"144.831606ms","start":"2025-11-24T09:05:01.566593Z","end":"2025-11-24T09:05:01.711425Z","steps":["trace[2085549652] 'process raft request' (duration: 144.652194ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:01.711454Z","caller":"traceutil/trace.go:171","msg":"trace[1776690454] transaction","detail":"{read_only:false; response_revision:24; number_of_response:1; }","duration":"143.564662ms","start":"2025-11-24T09:05:01.567876Z","end":"2025-11-24T09:05:01.71144Z","steps":["trace[1776690454] 'process raft request' (duration: 143.429904ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-24T09:05:01.711724Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"146.213558ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/namespaces/kube-system\" ","response":"range_response_count:1 size:350"}
{"level":"info","ts":"2025-11-24T09:05:01.711757Z","caller":"traceutil/trace.go:171","msg":"trace[366826393] range","detail":"{range_begin:/registry/namespaces/kube-system; range_end:; response_count:1; response_revision:25; }","duration":"146.253881ms","start":"2025-11-24T09:05:01.565494Z","end":"2025-11-24T09:05:01.711748Z","steps":["trace[366826393] 'agreement among raft nodes before linearized reading' (duration: 146.18478ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:01.711931Z","caller":"traceutil/trace.go:171","msg":"trace[1923893862] transaction","detail":"{read_only:false; response_revision:25; number_of_response:1; }","duration":"137.068438ms","start":"2025-11-24T09:05:01.574851Z","end":"2025-11-24T09:05:01.711919Z","steps":["trace[1923893862] 'process raft request' (duration: 136.481982ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-24T09:05:01.712125Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"171.955875ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/configmaps/kube-system/extension-apiserver-authentication\" ","response":"range_response_count:0 size:4"}
{"level":"info","ts":"2025-11-24T09:05:01.712163Z","caller":"traceutil/trace.go:171","msg":"trace[90940555] range","detail":"{range_begin:/registry/configmaps/kube-system/extension-apiserver-authentication; range_end:; response_count:0; response_revision:25; }","duration":"172.012061ms","start":"2025-11-24T09:05:01.54014Z","end":"2025-11-24T09:05:01.712153Z","steps":["trace[90940555] 'agreement among raft nodes before linearized reading' (duration: 171.930715ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-24T09:05:01.714609Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"175.250502ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/minions/old-k8s-version-128377\" ","response":"range_response_count:1 size:3558"}
{"level":"info","ts":"2025-11-24T09:05:01.714708Z","caller":"traceutil/trace.go:171","msg":"trace[322045522] range","detail":"{range_begin:/registry/minions/old-k8s-version-128377; range_end:; response_count:1; response_revision:25; }","duration":"175.353553ms","start":"2025-11-24T09:05:01.539338Z","end":"2025-11-24T09:05:01.714691Z","steps":["trace[322045522] 'agreement among raft nodes before linearized reading' (duration: 172.031487ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:03.559324Z","caller":"traceutil/trace.go:171","msg":"trace[627044044] transaction","detail":"{read_only:false; response_revision:204; number_of_response:1; }","duration":"100.594994ms","start":"2025-11-24T09:05:03.458371Z","end":"2025-11-24T09:05:03.558966Z","steps":["trace[627044044] 'process raft request' (duration: 98.72439ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T09:05:11.43815Z","caller":"traceutil/trace.go:171","msg":"trace[324713988] transaction","detail":"{read_only:false; response_revision:302; number_of_response:1; }","duration":"136.243687ms","start":"2025-11-24T09:05:11.301878Z","end":"2025-11-24T09:05:11.438122Z","steps":["trace[324713988] 'process raft request' (duration: 135.577137ms)"],"step_count":1}
==> kernel <==
09:05:46 up 3:48, 0 user, load average: 4.43, 3.43, 10.79
Linux old-k8s-version-128377 6.8.0-1044-gcp #47~22.04.1-Ubuntu SMP Thu Oct 23 21:07:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [818537e08c0605796949e72c73a034b7d5f104ce598d4a12f0ed8bf30de9c646] <==
I1124 09:05:21.342277 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1124 09:05:21.342547 1 main.go:139] hostIP = 192.168.103.2
podIP = 192.168.103.2
I1124 09:05:21.342705 1 main.go:148] setting mtu 1500 for CNI
I1124 09:05:21.342728 1 main.go:178] kindnetd IP family: "ipv4"
I1124 09:05:21.342756 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-24T09:05:21Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1124 09:05:21.545109 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1124 09:05:21.545137 1 controller.go:381] "Waiting for informer caches to sync"
I1124 09:05:21.545150 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1124 09:05:21.545827 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1124 09:05:22.046295 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1124 09:05:22.046329 1 metrics.go:72] Registering metrics
I1124 09:05:22.046391 1 controller.go:711] "Syncing nftables rules"
I1124 09:05:31.547663 1 main.go:297] Handling node with IPs: map[192.168.103.2:{}]
I1124 09:05:31.547728 1 main.go:301] handling current node
I1124 09:05:41.547315 1 main.go:297] Handling node with IPs: map[192.168.103.2:{}]
I1124 09:05:41.547363 1 main.go:301] handling current node
==> kube-apiserver [842bd9db2d84b65b054e4b006bfb9c11b98ac3cdcbe13cd821183480cd046d8a] <==
I1124 09:05:01.506809 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1124 09:05:01.506838 1 aggregator.go:166] initial CRD sync complete...
I1124 09:05:01.506846 1 autoregister_controller.go:141] Starting autoregister controller
I1124 09:05:01.506863 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1124 09:05:01.506869 1 cache.go:39] Caches are synced for autoregister controller
I1124 09:05:01.508109 1 controller.go:624] quota admission added evaluator for: namespaces
I1124 09:05:01.508757 1 shared_informer.go:318] Caches are synced for configmaps
E1124 09:05:01.537227 1 controller.go:146] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms"
I1124 09:05:01.741694 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1124 09:05:02.411561 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1124 09:05:02.415133 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1124 09:05:02.415155 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1124 09:05:02.826831 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1124 09:05:02.865354 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1124 09:05:02.945781 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1124 09:05:02.951178 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.103.2]
I1124 09:05:02.952085 1 controller.go:624] quota admission added evaluator for: endpoints
I1124 09:05:02.955858 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1124 09:05:03.457945 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1124 09:05:04.197911 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1124 09:05:04.208245 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1124 09:05:04.218442 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1124 09:05:17.015236 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1124 09:05:17.165046 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1124 09:05:17.165047 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [5d9ec22e03b8b0446d34a5b300037519eb0aa0be6b1e6c451907abb271f71839] <==
I1124 09:05:16.510194 1 node_lifecycle_controller.go:877] "Missing timestamp for Node. Assuming now as a timestamp" node="old-k8s-version-128377"
I1124 09:05:16.510252 1 node_lifecycle_controller.go:1029] "Controller detected that all Nodes are not-Ready. Entering master disruption mode"
I1124 09:05:16.516579 1 shared_informer.go:318] Caches are synced for resource quota
I1124 09:05:16.831807 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 09:05:16.890844 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 09:05:16.890883 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1124 09:05:17.019027 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1124 09:05:17.175390 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-gbp66"
I1124 09:05:17.176958 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-fpbs2"
I1124 09:05:17.325895 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-vxxnm"
I1124 09:05:17.332721 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-x5sl2"
I1124 09:05:17.343264 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="324.364712ms"
I1124 09:05:17.351654 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="8.320995ms"
I1124 09:05:17.351793 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="83.483µs"
I1124 09:05:17.672071 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1124 09:05:17.682409 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-x5sl2"
I1124 09:05:17.690482 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="20.456609ms"
I1124 09:05:17.698725 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="8.176655ms"
I1124 09:05:17.698851 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="86.584µs"
I1124 09:05:31.598337 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="79.212µs"
I1124 09:05:31.631586 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="116.266µs"
I1124 09:05:32.360508 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="141.431µs"
I1124 09:05:32.386954 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="6.987919ms"
I1124 09:05:32.387048 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="62.305µs"
I1124 09:05:36.514110 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [370631aaaf577fb6a343282108f71bb03e72ef6024de9d9f8e2a2eeb7e16e746] <==
I1124 09:05:17.831726 1 server_others.go:69] "Using iptables proxy"
I1124 09:05:17.841216 1 node.go:141] Successfully retrieved node IP: 192.168.103.2
I1124 09:05:17.866087 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1124 09:05:17.868989 1 server_others.go:152] "Using iptables Proxier"
I1124 09:05:17.869038 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1124 09:05:17.869048 1 server_others.go:438] "Defaulting to no-op detect-local"
I1124 09:05:17.869091 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1124 09:05:17.869396 1 server.go:846] "Version info" version="v1.28.0"
I1124 09:05:17.869419 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1124 09:05:17.870089 1 config.go:188] "Starting service config controller"
I1124 09:05:17.870115 1 config.go:315] "Starting node config controller"
I1124 09:05:17.870130 1 shared_informer.go:311] Waiting for caches to sync for service config
I1124 09:05:17.870125 1 config.go:97] "Starting endpoint slice config controller"
I1124 09:05:17.870157 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1124 09:05:17.870135 1 shared_informer.go:311] Waiting for caches to sync for node config
I1124 09:05:17.970983 1 shared_informer.go:318] Caches are synced for endpoint slice config
I1124 09:05:17.970991 1 shared_informer.go:318] Caches are synced for service config
I1124 09:05:17.970967 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [f5eddecfb179fe94de6b3892600fc1870efa5679c82874d72a3b301753e6f7d4] <==
E1124 09:05:01.478877 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1124 09:05:01.478878 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1124 09:05:01.478887 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1124 09:05:01.478907 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1124 09:05:01.478997 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1124 09:05:01.479055 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1124 09:05:01.479077 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1124 09:05:01.479125 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1124 09:05:02.313819 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1124 09:05:02.313863 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1124 09:05:02.319417 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1124 09:05:02.319451 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1124 09:05:02.429310 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1124 09:05:02.429356 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1124 09:05:02.538603 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1124 09:05:02.538660 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1124 09:05:02.549098 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1124 09:05:02.549140 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1124 09:05:02.661900 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1124 09:05:02.661937 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W1124 09:05:02.666268 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1124 09:05:02.666312 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1124 09:05:02.688142 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1124 09:05:02.688189 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
I1124 09:05:03.073951 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 24 09:05:16 old-k8s-version-128377 kubelet[1521]: I1124 09:05:16.342896 1521 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.183175 1521 topology_manager.go:215] "Topology Admit Handler" podUID="52128126-550d-4795-9fa1-e1d3d9510dd3" podNamespace="kube-system" podName="kube-proxy-fpbs2"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.188113 1521 topology_manager.go:215] "Topology Admit Handler" podUID="49954742-ea7f-466f-80d8-7d6ac88ce36c" podNamespace="kube-system" podName="kindnet-gbp66"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338200 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzbjt\" (UniqueName: \"kubernetes.io/projected/52128126-550d-4795-9fa1-e1d3d9510dd3-kube-api-access-vzbjt\") pod \"kube-proxy-fpbs2\" (UID: \"52128126-550d-4795-9fa1-e1d3d9510dd3\") " pod="kube-system/kube-proxy-fpbs2"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338280 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/49954742-ea7f-466f-80d8-7d6ac88ce36c-cni-cfg\") pod \"kindnet-gbp66\" (UID: \"49954742-ea7f-466f-80d8-7d6ac88ce36c\") " pod="kube-system/kindnet-gbp66"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338319 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/52128126-550d-4795-9fa1-e1d3d9510dd3-lib-modules\") pod \"kube-proxy-fpbs2\" (UID: \"52128126-550d-4795-9fa1-e1d3d9510dd3\") " pod="kube-system/kube-proxy-fpbs2"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338351 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/49954742-ea7f-466f-80d8-7d6ac88ce36c-lib-modules\") pod \"kindnet-gbp66\" (UID: \"49954742-ea7f-466f-80d8-7d6ac88ce36c\") " pod="kube-system/kindnet-gbp66"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338392 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/52128126-550d-4795-9fa1-e1d3d9510dd3-kube-proxy\") pod \"kube-proxy-fpbs2\" (UID: \"52128126-550d-4795-9fa1-e1d3d9510dd3\") " pod="kube-system/kube-proxy-fpbs2"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338424 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/49954742-ea7f-466f-80d8-7d6ac88ce36c-xtables-lock\") pod \"kindnet-gbp66\" (UID: \"49954742-ea7f-466f-80d8-7d6ac88ce36c\") " pod="kube-system/kindnet-gbp66"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338473 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cd5l7\" (UniqueName: \"kubernetes.io/projected/49954742-ea7f-466f-80d8-7d6ac88ce36c-kube-api-access-cd5l7\") pod \"kindnet-gbp66\" (UID: \"49954742-ea7f-466f-80d8-7d6ac88ce36c\") " pod="kube-system/kindnet-gbp66"
Nov 24 09:05:17 old-k8s-version-128377 kubelet[1521]: I1124 09:05:17.338537 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/52128126-550d-4795-9fa1-e1d3d9510dd3-xtables-lock\") pod \"kube-proxy-fpbs2\" (UID: \"52128126-550d-4795-9fa1-e1d3d9510dd3\") " pod="kube-system/kube-proxy-fpbs2"
Nov 24 09:05:18 old-k8s-version-128377 kubelet[1521]: I1124 09:05:18.914069 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-fpbs2" podStartSLOduration=1.913988204 podCreationTimestamp="2025-11-24 09:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 09:05:18.331224336 +0000 UTC m=+14.156867889" watchObservedRunningTime="2025-11-24 09:05:18.913988204 +0000 UTC m=+14.739631764"
Nov 24 09:05:21 old-k8s-version-128377 kubelet[1521]: I1124 09:05:21.337175 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-gbp66" podStartSLOduration=1.258069975 podCreationTimestamp="2025-11-24 09:05:17 +0000 UTC" firstStartedPulling="2025-11-24 09:05:17.956037798 +0000 UTC m=+13.781681343" lastFinishedPulling="2025-11-24 09:05:21.035088666 +0000 UTC m=+16.860732211" observedRunningTime="2025-11-24 09:05:21.33698865 +0000 UTC m=+17.162632223" watchObservedRunningTime="2025-11-24 09:05:21.337120843 +0000 UTC m=+17.162764404"
Nov 24 09:05:31 old-k8s-version-128377 kubelet[1521]: I1124 09:05:31.576686 1521 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 24 09:05:31 old-k8s-version-128377 kubelet[1521]: I1124 09:05:31.597206 1521 topology_manager.go:215] "Topology Admit Handler" podUID="7e4f56c0-0b49-47cd-9278-129ad898b781" podNamespace="kube-system" podName="storage-provisioner"
Nov 24 09:05:31 old-k8s-version-128377 kubelet[1521]: I1124 09:05:31.598949 1521 topology_manager.go:215] "Topology Admit Handler" podUID="b84bae0f-9f75-4d1c-b2ed-da0c10a141cf" podNamespace="kube-system" podName="coredns-5dd5756b68-vxxnm"
Nov 24 09:05:31 old-k8s-version-128377 kubelet[1521]: I1124 09:05:31.745876 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/7e4f56c0-0b49-47cd-9278-129ad898b781-tmp\") pod \"storage-provisioner\" (UID: \"7e4f56c0-0b49-47cd-9278-129ad898b781\") " pod="kube-system/storage-provisioner"
Nov 24 09:05:31 old-k8s-version-128377 kubelet[1521]: I1124 09:05:31.746005 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b84bae0f-9f75-4d1c-b2ed-da0c10a141cf-config-volume\") pod \"coredns-5dd5756b68-vxxnm\" (UID: \"b84bae0f-9f75-4d1c-b2ed-da0c10a141cf\") " pod="kube-system/coredns-5dd5756b68-vxxnm"
Nov 24 09:05:31 old-k8s-version-128377 kubelet[1521]: I1124 09:05:31.746049 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s87ck\" (UniqueName: \"kubernetes.io/projected/b84bae0f-9f75-4d1c-b2ed-da0c10a141cf-kube-api-access-s87ck\") pod \"coredns-5dd5756b68-vxxnm\" (UID: \"b84bae0f-9f75-4d1c-b2ed-da0c10a141cf\") " pod="kube-system/coredns-5dd5756b68-vxxnm"
Nov 24 09:05:31 old-k8s-version-128377 kubelet[1521]: I1124 09:05:31.746075 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mp79g\" (UniqueName: \"kubernetes.io/projected/7e4f56c0-0b49-47cd-9278-129ad898b781-kube-api-access-mp79g\") pod \"storage-provisioner\" (UID: \"7e4f56c0-0b49-47cd-9278-129ad898b781\") " pod="kube-system/storage-provisioner"
Nov 24 09:05:32 old-k8s-version-128377 kubelet[1521]: I1124 09:05:32.360059 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-vxxnm" podStartSLOduration=15.360007602 podCreationTimestamp="2025-11-24 09:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 09:05:32.35995945 +0000 UTC m=+28.185603012" watchObservedRunningTime="2025-11-24 09:05:32.360007602 +0000 UTC m=+28.185651165"
Nov 24 09:05:32 old-k8s-version-128377 kubelet[1521]: I1124 09:05:32.379733 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=15.379681272 podCreationTimestamp="2025-11-24 09:05:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 09:05:32.370112867 +0000 UTC m=+28.195756426" watchObservedRunningTime="2025-11-24 09:05:32.379681272 +0000 UTC m=+28.205324835"
Nov 24 09:05:34 old-k8s-version-128377 kubelet[1521]: I1124 09:05:34.439352 1521 topology_manager.go:215] "Topology Admit Handler" podUID="bfaec734-d874-4dcb-b31f-feb87adccfca" podNamespace="default" podName="busybox"
Nov 24 09:05:34 old-k8s-version-128377 kubelet[1521]: I1124 09:05:34.561236 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwqg6\" (UniqueName: \"kubernetes.io/projected/bfaec734-d874-4dcb-b31f-feb87adccfca-kube-api-access-qwqg6\") pod \"busybox\" (UID: \"bfaec734-d874-4dcb-b31f-feb87adccfca\") " pod="default/busybox"
Nov 24 09:05:38 old-k8s-version-128377 kubelet[1521]: I1124 09:05:38.375611 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.866491732 podCreationTimestamp="2025-11-24 09:05:34 +0000 UTC" firstStartedPulling="2025-11-24 09:05:34.853152472 +0000 UTC m=+30.678796027" lastFinishedPulling="2025-11-24 09:05:37.362217947 +0000 UTC m=+33.187861503" observedRunningTime="2025-11-24 09:05:38.375372923 +0000 UTC m=+34.201016485" watchObservedRunningTime="2025-11-24 09:05:38.375557208 +0000 UTC m=+34.201200770"
==> storage-provisioner [a9a5857553e67019e47641c1970bb0d5555afd6b608c94a94501dd485efac0c4] <==
I1124 09:05:32.081185 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1124 09:05:32.090604 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1124 09:05:32.090641 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1124 09:05:32.097885 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1124 09:05:32.097963 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"742d8911-ea16-4251-8cf0-6f909959732d", APIVersion:"v1", ResourceVersion:"433", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-128377_807761f2-87be-4f83-a3e6-a9218ea13b30 became leader
I1124 09:05:32.098144 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-128377_807761f2-87be-4f83-a3e6-a9218ea13b30!
I1124 09:05:32.198942 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-128377_807761f2-87be-4f83-a3e6-a9218ea13b30!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-128377 -n old-k8s-version-128377
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-128377 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestStartStop/group/old-k8s-version/serial/DeployApp (13.41s)