=== RUN TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-462319 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [89dd9411-148d-4a8e-98d3-a51a8eab9d35] Pending
helpers_test.go:352: "busybox" [89dd9411-148d-4a8e-98d3-a51a8eab9d35] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [89dd9411-148d-4a8e-98d3-a51a8eab9d35] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 9.004358505s
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-462319 exec busybox -- /bin/sh -c "ulimit -n"
start_stop_delete_test.go:194: 'ulimit -n' returned 1024, expected 1048576
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-462319
helpers_test.go:243: (dbg) docker inspect old-k8s-version-462319:
-- stdout --
[
{
"Id": "60eae3b63b81b346ead7547921d488153ed6b21604550a910dce24f5c18a0d66",
"Created": "2025-11-22T00:19:16.365495044Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 248707,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-22T00:19:16.402958348Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:e5906b22e872a17998ae88aee6d850484e7a99144e0db6afcf2c44a53e6042d4",
"ResolvConfPath": "/var/lib/docker/containers/60eae3b63b81b346ead7547921d488153ed6b21604550a910dce24f5c18a0d66/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/60eae3b63b81b346ead7547921d488153ed6b21604550a910dce24f5c18a0d66/hostname",
"HostsPath": "/var/lib/docker/containers/60eae3b63b81b346ead7547921d488153ed6b21604550a910dce24f5c18a0d66/hosts",
"LogPath": "/var/lib/docker/containers/60eae3b63b81b346ead7547921d488153ed6b21604550a910dce24f5c18a0d66/60eae3b63b81b346ead7547921d488153ed6b21604550a910dce24f5c18a0d66-json.log",
"Name": "/old-k8s-version-462319",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"old-k8s-version-462319:/var",
"/lib/modules:/lib/modules:ro"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-462319",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "60eae3b63b81b346ead7547921d488153ed6b21604550a910dce24f5c18a0d66",
"LowerDir": "/var/lib/docker/overlay2/6ca06b58ff047715f101193d0f051e92ffb3bb47f4e9e98de16e3d4c7f58beb1-init/diff:/var/lib/docker/overlay2/4b4af9a4e857911a6b5096aeeaee227ee7577c6eff3b08bbb4e765c49ed2fb70/diff",
"MergedDir": "/var/lib/docker/overlay2/6ca06b58ff047715f101193d0f051e92ffb3bb47f4e9e98de16e3d4c7f58beb1/merged",
"UpperDir": "/var/lib/docker/overlay2/6ca06b58ff047715f101193d0f051e92ffb3bb47f4e9e98de16e3d4c7f58beb1/diff",
"WorkDir": "/var/lib/docker/overlay2/6ca06b58ff047715f101193d0f051e92ffb3bb47f4e9e98de16e3d4c7f58beb1/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-462319",
"Source": "/var/lib/docker/volumes/old-k8s-version-462319/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-462319",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-462319",
"name.minikube.sigs.k8s.io": "old-k8s-version-462319",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "b6589169c31c78bfea6577019ea30ba0adadee1467810b9b1a0b1b8b4a97b9f5",
"SandboxKey": "/var/run/docker/netns/b6589169c31c",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33058"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33059"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33062"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33060"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33061"
}
]
},
"Networks": {
"old-k8s-version-462319": {
"IPAMConfig": {
"IPv4Address": "192.168.103.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "08252eaaf7e532efc839aa6b0c4ce7bea14dc3e5057df8085e81eab6e1e46265",
"EndpointID": "d132fdb6f6e769e175e9e69bd315da82881eb4351a6b66ae2fe24784dbabd3ac",
"Gateway": "192.168.103.1",
"IPAddress": "192.168.103.2",
"MacAddress": "6e:0f:4c:be:16:ac",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-462319",
"60eae3b63b81"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-462319 -n old-k8s-version-462319
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-462319 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-462319 logs -n 25: (1.155373591s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬────────────────────────┬─────────┬─────────┬─────────────────────┬──────────────
───────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼────────────────────────┼─────────┼─────────┼─────────────────────┼──────────────
───────┤
│ ssh │ -p cilium-687868 sudo systemctl status cri-docker --all --full --no-pager │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo systemctl cat cri-docker --no-pager │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo cri-dockerd --version │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo systemctl status containerd --all --full --no-pager │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo systemctl cat containerd --no-pager │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo cat /lib/systemd/system/containerd.service │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo cat /etc/containerd/config.toml │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo containerd config dump │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo systemctl status crio --all --full --no-pager │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo systemctl cat crio --no-pager │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo crio config │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ delete │ -p cilium-687868 │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ 22 Nov 25 00:19 UTC │
│ start │ -p old-k8s-version-462319 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-462319 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ 22 Nov 25 00:20 UTC │
│ ssh │ -p NoKubernetes-714059 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-714059 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ start │ -p cert-expiration-427330 --memory=3072 --cert-expiration=8760h --driver=docker --container-runtime=containerd │ cert-expiration-427330 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ 22 Nov 25 00:19 UTC │
│ delete │ -p cert-expiration-427330 │ cert-expiration-427330 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ 22 Nov 25 00:19 UTC │
│ start │ -p no-preload-781232 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ no-preload-781232 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ stop │ -p NoKubernetes-714059 │ NoKubernetes-714059 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ 22 Nov 25 00:19 UTC │
│ start │ -p NoKubernetes-714059 --driver=docker --container-runtime=containerd │ NoKubernetes-714059 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ 22 Nov 25 00:19 UTC │
│ ssh │ -p NoKubernetes-714059 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-714059 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ delete │ -p NoKubernetes-714059 │ NoKubernetes-714059 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ 22 Nov 25 00:20 UTC │
│ start │ -p embed-certs-491677 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ embed-certs-491677 │ jenkins │ v1.37.0 │ 22 Nov 25 00:20 UTC │ │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴────────────────────────┴─────────┴─────────┴─────────────────────┴──────────────
───────┘
==> Last Start <==
Log file created at: 2025/11/22 00:20:01
Running on machine: ubuntu-20-agent-6
Binary: Built with gc go1.25.3 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1122 00:20:01.497017 260527 out.go:360] Setting OutFile to fd 1 ...
I1122 00:20:01.497324 260527 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1122 00:20:01.497336 260527 out.go:374] Setting ErrFile to fd 2...
I1122 00:20:01.497340 260527 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1122 00:20:01.497588 260527 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21934-9059/.minikube/bin
I1122 00:20:01.498054 260527 out.go:368] Setting JSON to false
I1122 00:20:01.499443 260527 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-6","uptime":3740,"bootTime":1763767061,"procs":385,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1122 00:20:01.499503 260527 start.go:143] virtualization: kvm guest
I1122 00:20:01.501458 260527 out.go:179] * [embed-certs-491677] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1122 00:20:01.503562 260527 notify.go:221] Checking for updates...
I1122 00:20:01.503572 260527 out.go:179] - MINIKUBE_LOCATION=21934
I1122 00:20:01.505088 260527 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1122 00:20:01.506758 260527 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21934-9059/kubeconfig
I1122 00:20:01.508287 260527 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21934-9059/.minikube
I1122 00:20:01.509699 260527 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1122 00:20:01.511183 260527 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1122 00:20:01.513382 260527 config.go:182] Loaded profile config "kubernetes-upgrade-882262": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1122 00:20:01.513541 260527 config.go:182] Loaded profile config "no-preload-781232": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1122 00:20:01.513638 260527 config.go:182] Loaded profile config "old-k8s-version-462319": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1122 00:20:01.513752 260527 driver.go:422] Setting default libvirt URI to qemu:///system
I1122 00:20:01.545401 260527 docker.go:124] docker version: linux-29.0.2:Docker Engine - Community
I1122 00:20:01.545504 260527 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1122 00:20:01.611105 260527 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:63 OomKillDisable:false NGoroutines:75 SystemTime:2025-11-22 00:20:01.601298329 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1122 00:20:01.611234 260527 docker.go:319] overlay module found
I1122 00:20:01.613226 260527 out.go:179] * Using the docker driver based on user configuration
I1122 00:20:01.614649 260527 start.go:309] selected driver: docker
I1122 00:20:01.614666 260527 start.go:930] validating driver "docker" against <nil>
I1122 00:20:01.614677 260527 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1122 00:20:01.615350 260527 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1122 00:20:01.674666 260527 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:64 OomKillDisable:false NGoroutines:76 SystemTime:2025-11-22 00:20:01.664354692 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1122 00:20:01.674876 260527 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1122 00:20:01.675176 260527 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1122 00:20:01.676975 260527 out.go:179] * Using Docker driver with root privileges
I1122 00:20:01.678251 260527 cni.go:84] Creating CNI manager for ""
I1122 00:20:01.678367 260527 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1122 00:20:01.678383 260527 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1122 00:20:01.678481 260527 start.go:353] cluster config:
{Name:embed-certs-491677 KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:embed-certs-491677 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Contain
erRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock
: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1122 00:20:01.680036 260527 out.go:179] * Starting "embed-certs-491677" primary control-plane node in "embed-certs-491677" cluster
I1122 00:20:01.683810 260527 cache.go:134] Beginning downloading kic base image for docker with containerd
I1122 00:20:01.685242 260527 out.go:179] * Pulling base image v0.0.48-1763588073-21934 ...
I1122 00:20:01.686680 260527 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1122 00:20:01.686729 260527 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21934-9059/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4
I1122 00:20:01.686743 260527 cache.go:65] Caching tarball of preloaded images
I1122 00:20:01.686775 260527 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e in local docker daemon
I1122 00:20:01.686916 260527 preload.go:238] Found /home/jenkins/minikube-integration/21934-9059/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I1122 00:20:01.686942 260527 cache.go:68] Finished verifying existence of preloaded tar for v1.34.1 on containerd
I1122 00:20:01.687116 260527 profile.go:143] Saving config to /home/jenkins/minikube-integration/21934-9059/.minikube/profiles/embed-certs-491677/config.json ...
I1122 00:20:01.687148 260527 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21934-9059/.minikube/profiles/embed-certs-491677/config.json: {Name:mkf02d672882aad1c3b94e79745f8cf62e3f5b13 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1122 00:20:01.708872 260527 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e in local docker daemon, skipping pull
I1122 00:20:01.708897 260527 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e exists in daemon, skipping load
I1122 00:20:01.708914 260527 cache.go:243] Successfully downloaded all kic artifacts
I1122 00:20:01.708943 260527 start.go:360] acquireMachinesLock for embed-certs-491677: {Name:mkbe59d49caffedca862a9ecb177d8d82196efdb Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1122 00:20:01.709044 260527 start.go:364] duration metric: took 84.98µs to acquireMachinesLock for "embed-certs-491677"
I1122 00:20:01.709067 260527 start.go:93] Provisioning new machine with config: &{Name:embed-certs-491677 KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:embed-certs-491677 Namespace:default APIServerHAVIP: APIServe
rName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cus
tomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1122 00:20:01.709131 260527 start.go:125] createHost starting for "" (driver="docker")
I1122 00:19:58.829298 218693 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1122 00:19:58.829759 218693 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1122 00:19:58.829815 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1122 00:19:58.829864 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1122 00:19:58.856999 218693 cri.go:89] found id: "031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:19:58.857027 218693 cri.go:89] found id: "2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:19:58.857033 218693 cri.go:89] found id: ""
I1122 00:19:58.857044 218693 logs.go:282] 2 containers: [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587]
I1122 00:19:58.857093 218693 ssh_runner.go:195] Run: which crictl
I1122 00:19:58.861107 218693 ssh_runner.go:195] Run: which crictl
I1122 00:19:58.865268 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1122 00:19:58.865337 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1122 00:19:58.892542 218693 cri.go:89] found id: "ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:19:58.892564 218693 cri.go:89] found id: ""
I1122 00:19:58.892572 218693 logs.go:282] 1 containers: [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7]
I1122 00:19:58.892626 218693 ssh_runner.go:195] Run: which crictl
I1122 00:19:58.896771 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1122 00:19:58.896846 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1122 00:19:58.925628 218693 cri.go:89] found id: ""
I1122 00:19:58.925652 218693 logs.go:282] 0 containers: []
W1122 00:19:58.925660 218693 logs.go:284] No container was found matching "coredns"
I1122 00:19:58.925666 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1122 00:19:58.925724 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1122 00:19:58.955304 218693 cri.go:89] found id: "8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:19:58.955326 218693 cri.go:89] found id: "b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:19:58.955332 218693 cri.go:89] found id: ""
I1122 00:19:58.955340 218693 logs.go:282] 2 containers: [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2]
I1122 00:19:58.955397 218693 ssh_runner.go:195] Run: which crictl
I1122 00:19:58.959396 218693 ssh_runner.go:195] Run: which crictl
I1122 00:19:58.963562 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1122 00:19:58.963626 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1122 00:19:58.991860 218693 cri.go:89] found id: ""
I1122 00:19:58.991883 218693 logs.go:282] 0 containers: []
W1122 00:19:58.991890 218693 logs.go:284] No container was found matching "kube-proxy"
I1122 00:19:58.991895 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1122 00:19:58.991949 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1122 00:19:59.020457 218693 cri.go:89] found id: "91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:19:59.020483 218693 cri.go:89] found id: "13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:19:59.020489 218693 cri.go:89] found id: ""
I1122 00:19:59.020502 218693 logs.go:282] 2 containers: [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216]
I1122 00:19:59.020550 218693 ssh_runner.go:195] Run: which crictl
I1122 00:19:59.024967 218693 ssh_runner.go:195] Run: which crictl
I1122 00:19:59.031778 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1122 00:19:59.031854 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1122 00:19:59.061726 218693 cri.go:89] found id: ""
I1122 00:19:59.061752 218693 logs.go:282] 0 containers: []
W1122 00:19:59.061763 218693 logs.go:284] No container was found matching "kindnet"
I1122 00:19:59.061771 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1122 00:19:59.061831 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1122 00:19:59.089141 218693 cri.go:89] found id: ""
I1122 00:19:59.089164 218693 logs.go:282] 0 containers: []
W1122 00:19:59.089174 218693 logs.go:284] No container was found matching "storage-provisioner"
I1122 00:19:59.089185 218693 logs.go:123] Gathering logs for kubelet ...
I1122 00:19:59.089198 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1122 00:19:59.186417 218693 logs.go:123] Gathering logs for dmesg ...
I1122 00:19:59.186452 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1122 00:19:59.201060 218693 logs.go:123] Gathering logs for describe nodes ...
I1122 00:19:59.201095 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1122 00:19:59.264254 218693 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1122 00:19:59.264297 218693 logs.go:123] Gathering logs for kube-apiserver [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d] ...
I1122 00:19:59.264313 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:19:59.303605 218693 logs.go:123] Gathering logs for kube-scheduler [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78] ...
I1122 00:19:59.303643 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:19:59.358382 218693 logs.go:123] Gathering logs for kube-controller-manager [13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216] ...
I1122 00:19:59.358425 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:19:59.398629 218693 logs.go:123] Gathering logs for containerd ...
I1122 00:19:59.398669 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1122 00:19:59.449463 218693 logs.go:123] Gathering logs for kube-apiserver [2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587] ...
I1122 00:19:59.449505 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:19:59.487365 218693 logs.go:123] Gathering logs for etcd [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7] ...
I1122 00:19:59.487403 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:19:59.526046 218693 logs.go:123] Gathering logs for kube-scheduler [b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2] ...
I1122 00:19:59.526080 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:19:59.562812 218693 logs.go:123] Gathering logs for kube-controller-manager [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a] ...
I1122 00:19:59.562843 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:19:59.594191 218693 logs.go:123] Gathering logs for container status ...
I1122 00:19:59.594230 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1122 00:20:02.129372 218693 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1122 00:20:02.129923 218693 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1122 00:20:02.130004 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1122 00:20:02.130071 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1122 00:20:02.161455 218693 cri.go:89] found id: "031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:20:02.161484 218693 cri.go:89] found id: "2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:20:02.161490 218693 cri.go:89] found id: ""
I1122 00:20:02.161501 218693 logs.go:282] 2 containers: [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587]
I1122 00:20:02.161563 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:02.165824 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:02.170451 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1122 00:20:02.170522 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1122 00:19:58.029853 251199 kapi.go:214] "coredns" deployment in "kube-system" namespace and "no-preload-781232" context rescaled to 1 replicas
W1122 00:19:59.529847 251199 node_ready.go:57] node "no-preload-781232" has "Ready":"False" status (will retry)
W1122 00:20:01.530493 251199 node_ready.go:57] node "no-preload-781232" has "Ready":"False" status (will retry)
W1122 00:20:00.520224 247021 node_ready.go:57] node "old-k8s-version-462319" has "Ready":"False" status (will retry)
I1122 00:20:01.019651 247021 node_ready.go:49] node "old-k8s-version-462319" is "Ready"
I1122 00:20:01.019681 247021 node_ready.go:38] duration metric: took 14.003330086s for node "old-k8s-version-462319" to be "Ready" ...
I1122 00:20:01.019696 247021 api_server.go:52] waiting for apiserver process to appear ...
I1122 00:20:01.019743 247021 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1122 00:20:01.032926 247021 api_server.go:72] duration metric: took 14.481952557s to wait for apiserver process to appear ...
I1122 00:20:01.032954 247021 api_server.go:88] waiting for apiserver healthz status ...
I1122 00:20:01.032973 247021 api_server.go:253] Checking apiserver healthz at https://192.168.103.2:8443/healthz ...
I1122 00:20:01.039899 247021 api_server.go:279] https://192.168.103.2:8443/healthz returned 200:
ok
I1122 00:20:01.041146 247021 api_server.go:141] control plane version: v1.28.0
I1122 00:20:01.041172 247021 api_server.go:131] duration metric: took 8.212119ms to wait for apiserver health ...
I1122 00:20:01.041191 247021 system_pods.go:43] waiting for kube-system pods to appear ...
I1122 00:20:01.044815 247021 system_pods.go:59] 8 kube-system pods found
I1122 00:20:01.044853 247021 system_pods.go:61] "coredns-5dd5756b68-pqbfp" [44750e8d-5eeb-4845-9029-a58cbf976b62] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:01.044862 247021 system_pods.go:61] "etcd-old-k8s-version-462319" [9580468b-aa0f-4d73-9c35-f9cc4c817cdd] Running
I1122 00:20:01.044874 247021 system_pods.go:61] "kindnet-ldtd8" [6bf161d2-c442-466d-98b8-c313a127bf22] Running
I1122 00:20:01.044879 247021 system_pods.go:61] "kube-apiserver-old-k8s-version-462319" [2f4b6fd0-2929-448d-820c-aabf2a9d4744] Running
I1122 00:20:01.044888 247021 system_pods.go:61] "kube-controller-manager-old-k8s-version-462319" [83b4a291-8bac-4581-b4a6-80471e7228eb] Running
I1122 00:20:01.044897 247021 system_pods.go:61] "kube-proxy-kqrng" [643cd348-4af3-4720-af0d-e931f184742c] Running
I1122 00:20:01.044901 247021 system_pods.go:61] "kube-scheduler-old-k8s-version-462319" [c1dc982d-cc79-4df6-bdc4-7e47f5d5236c] Running
I1122 00:20:01.044909 247021 system_pods.go:61] "storage-provisioner" [fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1122 00:20:01.044918 247021 system_pods.go:74] duration metric: took 3.718269ms to wait for pod list to return data ...
I1122 00:20:01.044929 247021 default_sa.go:34] waiting for default service account to be created ...
I1122 00:20:01.047150 247021 default_sa.go:45] found service account: "default"
I1122 00:20:01.047173 247021 default_sa.go:55] duration metric: took 2.236156ms for default service account to be created ...
I1122 00:20:01.047182 247021 system_pods.go:116] waiting for k8s-apps to be running ...
I1122 00:20:01.050474 247021 system_pods.go:86] 8 kube-system pods found
I1122 00:20:01.050506 247021 system_pods.go:89] "coredns-5dd5756b68-pqbfp" [44750e8d-5eeb-4845-9029-a58cbf976b62] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:01.050514 247021 system_pods.go:89] "etcd-old-k8s-version-462319" [9580468b-aa0f-4d73-9c35-f9cc4c817cdd] Running
I1122 00:20:01.050523 247021 system_pods.go:89] "kindnet-ldtd8" [6bf161d2-c442-466d-98b8-c313a127bf22] Running
I1122 00:20:01.050528 247021 system_pods.go:89] "kube-apiserver-old-k8s-version-462319" [2f4b6fd0-2929-448d-820c-aabf2a9d4744] Running
I1122 00:20:01.050533 247021 system_pods.go:89] "kube-controller-manager-old-k8s-version-462319" [83b4a291-8bac-4581-b4a6-80471e7228eb] Running
I1122 00:20:01.050539 247021 system_pods.go:89] "kube-proxy-kqrng" [643cd348-4af3-4720-af0d-e931f184742c] Running
I1122 00:20:01.050544 247021 system_pods.go:89] "kube-scheduler-old-k8s-version-462319" [c1dc982d-cc79-4df6-bdc4-7e47f5d5236c] Running
I1122 00:20:01.050551 247021 system_pods.go:89] "storage-provisioner" [fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1122 00:20:01.050577 247021 retry.go:31] will retry after 205.575764ms: missing components: kube-dns
I1122 00:20:01.261814 247021 system_pods.go:86] 8 kube-system pods found
I1122 00:20:01.261847 247021 system_pods.go:89] "coredns-5dd5756b68-pqbfp" [44750e8d-5eeb-4845-9029-a58cbf976b62] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:01.261859 247021 system_pods.go:89] "etcd-old-k8s-version-462319" [9580468b-aa0f-4d73-9c35-f9cc4c817cdd] Running
I1122 00:20:01.261865 247021 system_pods.go:89] "kindnet-ldtd8" [6bf161d2-c442-466d-98b8-c313a127bf22] Running
I1122 00:20:01.261869 247021 system_pods.go:89] "kube-apiserver-old-k8s-version-462319" [2f4b6fd0-2929-448d-820c-aabf2a9d4744] Running
I1122 00:20:01.261873 247021 system_pods.go:89] "kube-controller-manager-old-k8s-version-462319" [83b4a291-8bac-4581-b4a6-80471e7228eb] Running
I1122 00:20:01.261877 247021 system_pods.go:89] "kube-proxy-kqrng" [643cd348-4af3-4720-af0d-e931f184742c] Running
I1122 00:20:01.261879 247021 system_pods.go:89] "kube-scheduler-old-k8s-version-462319" [c1dc982d-cc79-4df6-bdc4-7e47f5d5236c] Running
I1122 00:20:01.261884 247021 system_pods.go:89] "storage-provisioner" [fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1122 00:20:01.261900 247021 retry.go:31] will retry after 236.21482ms: missing components: kube-dns
I1122 00:20:01.502877 247021 system_pods.go:86] 8 kube-system pods found
I1122 00:20:01.502913 247021 system_pods.go:89] "coredns-5dd5756b68-pqbfp" [44750e8d-5eeb-4845-9029-a58cbf976b62] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:01.502921 247021 system_pods.go:89] "etcd-old-k8s-version-462319" [9580468b-aa0f-4d73-9c35-f9cc4c817cdd] Running
I1122 00:20:01.502929 247021 system_pods.go:89] "kindnet-ldtd8" [6bf161d2-c442-466d-98b8-c313a127bf22] Running
I1122 00:20:01.502935 247021 system_pods.go:89] "kube-apiserver-old-k8s-version-462319" [2f4b6fd0-2929-448d-820c-aabf2a9d4744] Running
I1122 00:20:01.502952 247021 system_pods.go:89] "kube-controller-manager-old-k8s-version-462319" [83b4a291-8bac-4581-b4a6-80471e7228eb] Running
I1122 00:20:01.502957 247021 system_pods.go:89] "kube-proxy-kqrng" [643cd348-4af3-4720-af0d-e931f184742c] Running
I1122 00:20:01.502962 247021 system_pods.go:89] "kube-scheduler-old-k8s-version-462319" [c1dc982d-cc79-4df6-bdc4-7e47f5d5236c] Running
I1122 00:20:01.502984 247021 system_pods.go:89] "storage-provisioner" [fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1122 00:20:01.503005 247021 retry.go:31] will retry after 442.873739ms: missing components: kube-dns
I1122 00:20:01.950449 247021 system_pods.go:86] 8 kube-system pods found
I1122 00:20:01.950483 247021 system_pods.go:89] "coredns-5dd5756b68-pqbfp" [44750e8d-5eeb-4845-9029-a58cbf976b62] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:01.950492 247021 system_pods.go:89] "etcd-old-k8s-version-462319" [9580468b-aa0f-4d73-9c35-f9cc4c817cdd] Running
I1122 00:20:01.950500 247021 system_pods.go:89] "kindnet-ldtd8" [6bf161d2-c442-466d-98b8-c313a127bf22] Running
I1122 00:20:01.950505 247021 system_pods.go:89] "kube-apiserver-old-k8s-version-462319" [2f4b6fd0-2929-448d-820c-aabf2a9d4744] Running
I1122 00:20:01.950516 247021 system_pods.go:89] "kube-controller-manager-old-k8s-version-462319" [83b4a291-8bac-4581-b4a6-80471e7228eb] Running
I1122 00:20:01.950521 247021 system_pods.go:89] "kube-proxy-kqrng" [643cd348-4af3-4720-af0d-e931f184742c] Running
I1122 00:20:01.950526 247021 system_pods.go:89] "kube-scheduler-old-k8s-version-462319" [c1dc982d-cc79-4df6-bdc4-7e47f5d5236c] Running
I1122 00:20:01.950530 247021 system_pods.go:89] "storage-provisioner" [fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2] Running
I1122 00:20:01.950541 247021 system_pods.go:126] duration metric: took 903.352039ms to wait for k8s-apps to be running ...
I1122 00:20:01.950553 247021 system_svc.go:44] waiting for kubelet service to be running ....
I1122 00:20:01.950602 247021 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1122 00:20:01.964580 247021 system_svc.go:56] duration metric: took 14.015441ms WaitForService to wait for kubelet
I1122 00:20:01.964612 247021 kubeadm.go:587] duration metric: took 15.413644993s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1122 00:20:01.964634 247021 node_conditions.go:102] verifying NodePressure condition ...
I1122 00:20:01.968157 247021 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1122 00:20:01.968185 247021 node_conditions.go:123] node cpu capacity is 8
I1122 00:20:01.968205 247021 node_conditions.go:105] duration metric: took 3.565831ms to run NodePressure ...
I1122 00:20:01.968227 247021 start.go:242] waiting for startup goroutines ...
I1122 00:20:01.968237 247021 start.go:247] waiting for cluster config update ...
I1122 00:20:01.968254 247021 start.go:256] writing updated cluster config ...
I1122 00:20:01.968545 247021 ssh_runner.go:195] Run: rm -f paused
I1122 00:20:01.972712 247021 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1122 00:20:01.976920 247021 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-pqbfp" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:02.983354 247021 pod_ready.go:94] pod "coredns-5dd5756b68-pqbfp" is "Ready"
I1122 00:20:02.983385 247021 pod_ready.go:86] duration metric: took 1.00643947s for pod "coredns-5dd5756b68-pqbfp" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:02.987209 247021 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:02.992024 247021 pod_ready.go:94] pod "etcd-old-k8s-version-462319" is "Ready"
I1122 00:20:02.992053 247021 pod_ready.go:86] duration metric: took 4.821819ms for pod "etcd-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:02.994875 247021 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:02.998765 247021 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-462319" is "Ready"
I1122 00:20:02.998789 247021 pod_ready.go:86] duration metric: took 3.892836ms for pod "kube-apiserver-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:03.001798 247021 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:03.181579 247021 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-462319" is "Ready"
I1122 00:20:03.181611 247021 pod_ready.go:86] duration metric: took 179.791243ms for pod "kube-controller-manager-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:03.381883 247021 pod_ready.go:83] waiting for pod "kube-proxy-kqrng" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:03.781562 247021 pod_ready.go:94] pod "kube-proxy-kqrng" is "Ready"
I1122 00:20:03.781594 247021 pod_ready.go:86] duration metric: took 399.684082ms for pod "kube-proxy-kqrng" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:03.981736 247021 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:04.381559 247021 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-462319" is "Ready"
I1122 00:20:04.381590 247021 pod_ready.go:86] duration metric: took 399.825883ms for pod "kube-scheduler-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:04.381604 247021 pod_ready.go:40] duration metric: took 2.408861294s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1122 00:20:04.431804 247021 start.go:628] kubectl: 1.34.2, cluster: 1.28.0 (minor skew: 6)
I1122 00:20:04.435233 247021 out.go:203]
W1122 00:20:04.436473 247021 out.go:285] ! /usr/local/bin/kubectl is version 1.34.2, which may have incompatibilities with Kubernetes 1.28.0.
I1122 00:20:04.437863 247021 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1122 00:20:04.439555 247021 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-462319" cluster and "default" namespace by default
I1122 00:20:01.711315 260527 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1122 00:20:01.711555 260527 start.go:159] libmachine.API.Create for "embed-certs-491677" (driver="docker")
I1122 00:20:01.711610 260527 client.go:173] LocalClient.Create starting
I1122 00:20:01.711685 260527 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21934-9059/.minikube/certs/ca.pem
I1122 00:20:01.711719 260527 main.go:143] libmachine: Decoding PEM data...
I1122 00:20:01.711737 260527 main.go:143] libmachine: Parsing certificate...
I1122 00:20:01.711816 260527 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21934-9059/.minikube/certs/cert.pem
I1122 00:20:01.711837 260527 main.go:143] libmachine: Decoding PEM data...
I1122 00:20:01.711846 260527 main.go:143] libmachine: Parsing certificate...
I1122 00:20:01.712184 260527 cli_runner.go:164] Run: docker network inspect embed-certs-491677 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1122 00:20:01.730686 260527 cli_runner.go:211] docker network inspect embed-certs-491677 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1122 00:20:01.730752 260527 network_create.go:284] running [docker network inspect embed-certs-491677] to gather additional debugging logs...
I1122 00:20:01.730771 260527 cli_runner.go:164] Run: docker network inspect embed-certs-491677
W1122 00:20:01.749708 260527 cli_runner.go:211] docker network inspect embed-certs-491677 returned with exit code 1
I1122 00:20:01.749739 260527 network_create.go:287] error running [docker network inspect embed-certs-491677]: docker network inspect embed-certs-491677: exit status 1
stdout:
[]
stderr:
Error response from daemon: network embed-certs-491677 not found
I1122 00:20:01.749755 260527 network_create.go:289] output of [docker network inspect embed-certs-491677]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network embed-certs-491677 not found
** /stderr **
I1122 00:20:01.749902 260527 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1122 00:20:01.769006 260527 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-1df6c22ede91 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:5a:c7:f4:a5:24:54} reservation:<nil>}
I1122 00:20:01.769731 260527 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-7d48551462a8 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:ca:3b:0e:74:ee:57} reservation:<nil>}
I1122 00:20:01.770416 260527 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-c50004b7f5b6 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:56:73:1e:0d:b7:11} reservation:<nil>}
I1122 00:20:01.771113 260527 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-166d2f324fb5 IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:02:da:99:1e:87:6f} reservation:<nil>}
I1122 00:20:01.771891 260527 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001ebca10}
I1122 00:20:01.771919 260527 network_create.go:124] attempt to create docker network embed-certs-491677 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1122 00:20:01.771970 260527 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=embed-certs-491677 embed-certs-491677
I1122 00:20:01.823460 260527 network_create.go:108] docker network embed-certs-491677 192.168.85.0/24 created
I1122 00:20:01.823495 260527 kic.go:121] calculated static IP "192.168.85.2" for the "embed-certs-491677" container
I1122 00:20:01.823677 260527 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1122 00:20:01.843300 260527 cli_runner.go:164] Run: docker volume create embed-certs-491677 --label name.minikube.sigs.k8s.io=embed-certs-491677 --label created_by.minikube.sigs.k8s.io=true
I1122 00:20:01.863723 260527 oci.go:103] Successfully created a docker volume embed-certs-491677
I1122 00:20:01.863797 260527 cli_runner.go:164] Run: docker run --rm --name embed-certs-491677-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=embed-certs-491677 --entrypoint /usr/bin/test -v embed-certs-491677:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e -d /var/lib
I1122 00:20:02.270865 260527 oci.go:107] Successfully prepared a docker volume embed-certs-491677
I1122 00:20:02.270965 260527 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1122 00:20:02.270986 260527 kic.go:194] Starting extracting preloaded images to volume ...
I1122 00:20:02.271058 260527 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21934-9059/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v embed-certs-491677:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e -I lz4 -xf /preloaded.tar -C /extractDir
I1122 00:20:02.204729 218693 cri.go:89] found id: "ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:20:02.204756 218693 cri.go:89] found id: ""
I1122 00:20:02.204766 218693 logs.go:282] 1 containers: [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7]
I1122 00:20:02.204829 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:02.209535 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1122 00:20:02.209603 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1122 00:20:02.247383 218693 cri.go:89] found id: ""
I1122 00:20:02.247408 218693 logs.go:282] 0 containers: []
W1122 00:20:02.247416 218693 logs.go:284] No container was found matching "coredns"
I1122 00:20:02.247422 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1122 00:20:02.247484 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1122 00:20:02.277440 218693 cri.go:89] found id: "8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:20:02.277466 218693 cri.go:89] found id: "b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:20:02.277473 218693 cri.go:89] found id: ""
I1122 00:20:02.277483 218693 logs.go:282] 2 containers: [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2]
I1122 00:20:02.277545 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:02.282049 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:02.286514 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1122 00:20:02.286581 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1122 00:20:02.316706 218693 cri.go:89] found id: ""
I1122 00:20:02.316733 218693 logs.go:282] 0 containers: []
W1122 00:20:02.316744 218693 logs.go:284] No container was found matching "kube-proxy"
I1122 00:20:02.316753 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1122 00:20:02.316813 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1122 00:20:02.347451 218693 cri.go:89] found id: "91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:20:02.347471 218693 cri.go:89] found id: "13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:20:02.347476 218693 cri.go:89] found id: ""
I1122 00:20:02.347486 218693 logs.go:282] 2 containers: [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216]
I1122 00:20:02.347542 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:02.352378 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:02.356502 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1122 00:20:02.356561 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1122 00:20:02.384778 218693 cri.go:89] found id: ""
I1122 00:20:02.384802 218693 logs.go:282] 0 containers: []
W1122 00:20:02.384814 218693 logs.go:284] No container was found matching "kindnet"
I1122 00:20:02.384825 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1122 00:20:02.384887 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1122 00:20:02.421102 218693 cri.go:89] found id: ""
I1122 00:20:02.421131 218693 logs.go:282] 0 containers: []
W1122 00:20:02.421143 218693 logs.go:284] No container was found matching "storage-provisioner"
I1122 00:20:02.421156 218693 logs.go:123] Gathering logs for containerd ...
I1122 00:20:02.421171 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1122 00:20:02.477880 218693 logs.go:123] Gathering logs for kubelet ...
I1122 00:20:02.477924 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1122 00:20:02.574856 218693 logs.go:123] Gathering logs for describe nodes ...
I1122 00:20:02.574892 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1122 00:20:02.641120 218693 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1122 00:20:02.641142 218693 logs.go:123] Gathering logs for kube-apiserver [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d] ...
I1122 00:20:02.641154 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:20:02.681648 218693 logs.go:123] Gathering logs for kube-scheduler [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78] ...
I1122 00:20:02.681686 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:20:02.739093 218693 logs.go:123] Gathering logs for kube-scheduler [b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2] ...
I1122 00:20:02.739128 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:20:02.774358 218693 logs.go:123] Gathering logs for kube-controller-manager [13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216] ...
I1122 00:20:02.774395 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:20:02.810272 218693 logs.go:123] Gathering logs for container status ...
I1122 00:20:02.810310 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1122 00:20:02.842900 218693 logs.go:123] Gathering logs for dmesg ...
I1122 00:20:02.842942 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1122 00:20:02.857743 218693 logs.go:123] Gathering logs for kube-apiserver [2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587] ...
I1122 00:20:02.857784 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:20:02.894229 218693 logs.go:123] Gathering logs for etcd [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7] ...
I1122 00:20:02.894272 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:20:02.929523 218693 logs.go:123] Gathering logs for kube-controller-manager [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a] ...
I1122 00:20:02.929555 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:20:05.459958 218693 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1122 00:20:05.460532 218693 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1122 00:20:05.460597 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1122 00:20:05.460676 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1122 00:20:05.488636 218693 cri.go:89] found id: "031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:20:05.488658 218693 cri.go:89] found id: "2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:20:05.488662 218693 cri.go:89] found id: ""
I1122 00:20:05.488670 218693 logs.go:282] 2 containers: [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587]
I1122 00:20:05.488715 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:05.492971 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:05.496804 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1122 00:20:05.496876 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1122 00:20:05.524856 218693 cri.go:89] found id: "ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:20:05.524883 218693 cri.go:89] found id: ""
I1122 00:20:05.524902 218693 logs.go:282] 1 containers: [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7]
I1122 00:20:05.524962 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:05.529434 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1122 00:20:05.529521 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1122 00:20:05.557780 218693 cri.go:89] found id: ""
I1122 00:20:05.557805 218693 logs.go:282] 0 containers: []
W1122 00:20:05.557819 218693 logs.go:284] No container was found matching "coredns"
I1122 00:20:05.557828 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1122 00:20:05.557885 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1122 00:20:05.586142 218693 cri.go:89] found id: "8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:20:05.586166 218693 cri.go:89] found id: "b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:20:05.586173 218693 cri.go:89] found id: ""
I1122 00:20:05.586184 218693 logs.go:282] 2 containers: [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2]
I1122 00:20:05.586248 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:05.590458 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:05.594671 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1122 00:20:05.594752 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1122 00:20:05.623542 218693 cri.go:89] found id: ""
I1122 00:20:05.623565 218693 logs.go:282] 0 containers: []
W1122 00:20:05.623575 218693 logs.go:284] No container was found matching "kube-proxy"
I1122 00:20:05.623585 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1122 00:20:05.623653 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1122 00:20:05.651642 218693 cri.go:89] found id: "91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:20:05.651663 218693 cri.go:89] found id: "13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:20:05.651666 218693 cri.go:89] found id: ""
I1122 00:20:05.651674 218693 logs.go:282] 2 containers: [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216]
I1122 00:20:05.651724 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:05.655785 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:05.659668 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1122 00:20:05.659743 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1122 00:20:05.687725 218693 cri.go:89] found id: ""
I1122 00:20:05.687748 218693 logs.go:282] 0 containers: []
W1122 00:20:05.687756 218693 logs.go:284] No container was found matching "kindnet"
I1122 00:20:05.687762 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1122 00:20:05.687810 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1122 00:20:05.714403 218693 cri.go:89] found id: ""
I1122 00:20:05.714432 218693 logs.go:282] 0 containers: []
W1122 00:20:05.714444 218693 logs.go:284] No container was found matching "storage-provisioner"
I1122 00:20:05.714457 218693 logs.go:123] Gathering logs for kube-apiserver [2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587] ...
I1122 00:20:05.714472 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:20:05.748851 218693 logs.go:123] Gathering logs for kube-scheduler [b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2] ...
I1122 00:20:05.748901 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:20:05.784862 218693 logs.go:123] Gathering logs for kube-controller-manager [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a] ...
I1122 00:20:05.784899 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:20:05.813532 218693 logs.go:123] Gathering logs for kube-controller-manager [13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216] ...
I1122 00:20:05.813569 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:20:05.844930 218693 logs.go:123] Gathering logs for containerd ...
I1122 00:20:05.844965 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1122 00:20:05.897273 218693 logs.go:123] Gathering logs for container status ...
I1122 00:20:05.897337 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1122 00:20:05.935381 218693 logs.go:123] Gathering logs for kubelet ...
I1122 00:20:05.935417 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1122 00:20:06.025566 218693 logs.go:123] Gathering logs for dmesg ...
I1122 00:20:06.025612 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1122 00:20:06.040810 218693 logs.go:123] Gathering logs for describe nodes ...
I1122 00:20:06.040843 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1122 00:20:06.102006 218693 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1122 00:20:06.102032 218693 logs.go:123] Gathering logs for etcd [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7] ...
I1122 00:20:06.102050 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:20:06.136887 218693 logs.go:123] Gathering logs for kube-scheduler [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78] ...
I1122 00:20:06.136937 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:20:06.192634 218693 logs.go:123] Gathering logs for kube-apiserver [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d] ...
I1122 00:20:06.192674 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
W1122 00:20:04.029159 251199 node_ready.go:57] node "no-preload-781232" has "Ready":"False" status (will retry)
W1122 00:20:06.067087 251199 node_ready.go:57] node "no-preload-781232" has "Ready":"False" status (will retry)
I1122 00:20:06.722373 260527 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21934-9059/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v embed-certs-491677:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e -I lz4 -xf /preloaded.tar -C /extractDir: (4.451238931s)
I1122 00:20:06.722412 260527 kic.go:203] duration metric: took 4.451422839s to extract preloaded images to volume ...
W1122 00:20:06.722533 260527 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1122 00:20:06.722570 260527 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1122 00:20:06.722615 260527 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1122 00:20:06.782296 260527 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname embed-certs-491677 --name embed-certs-491677 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=embed-certs-491677 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=embed-certs-491677 --network embed-certs-491677 --ip 192.168.85.2 --volume embed-certs-491677:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e
I1122 00:20:07.109552 260527 cli_runner.go:164] Run: docker container inspect embed-certs-491677 --format={{.State.Running}}
I1122 00:20:07.129178 260527 cli_runner.go:164] Run: docker container inspect embed-certs-491677 --format={{.State.Status}}
I1122 00:20:07.148399 260527 cli_runner.go:164] Run: docker exec embed-certs-491677 stat /var/lib/dpkg/alternatives/iptables
I1122 00:20:07.196229 260527 oci.go:144] the created container "embed-certs-491677" has a running status.
I1122 00:20:07.196362 260527 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21934-9059/.minikube/machines/embed-certs-491677/id_rsa...
I1122 00:20:07.257446 260527 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21934-9059/.minikube/machines/embed-certs-491677/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1122 00:20:07.289218 260527 cli_runner.go:164] Run: docker container inspect embed-certs-491677 --format={{.State.Status}}
I1122 00:20:07.310559 260527 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1122 00:20:07.310578 260527 kic_runner.go:114] Args: [docker exec --privileged embed-certs-491677 chown docker:docker /home/docker/.ssh/authorized_keys]
I1122 00:20:07.351585 260527 cli_runner.go:164] Run: docker container inspect embed-certs-491677 --format={{.State.Status}}
I1122 00:20:07.374469 260527 machine.go:94] provisionDockerMachine start ...
I1122 00:20:07.374754 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:07.397641 260527 main.go:143] libmachine: Using SSH client type: native
I1122 00:20:07.397885 260527 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33073 <nil> <nil>}
I1122 00:20:07.397902 260527 main.go:143] libmachine: About to run SSH command:
hostname
I1122 00:20:07.398578 260527 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:36770->127.0.0.1:33073: read: connection reset by peer
I1122 00:20:10.523553 260527 main.go:143] libmachine: SSH cmd err, output: <nil>: embed-certs-491677
I1122 00:20:10.523587 260527 ubuntu.go:182] provisioning hostname "embed-certs-491677"
I1122 00:20:10.523652 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:10.544251 260527 main.go:143] libmachine: Using SSH client type: native
I1122 00:20:10.544519 260527 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33073 <nil> <nil>}
I1122 00:20:10.544536 260527 main.go:143] libmachine: About to run SSH command:
sudo hostname embed-certs-491677 && echo "embed-certs-491677" | sudo tee /etc/hostname
I1122 00:20:10.679747 260527 main.go:143] libmachine: SSH cmd err, output: <nil>: embed-certs-491677
I1122 00:20:10.679832 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:10.700586 260527 main.go:143] libmachine: Using SSH client type: native
I1122 00:20:10.700833 260527 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33073 <nil> <nil>}
I1122 00:20:10.700858 260527 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sembed-certs-491677' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 embed-certs-491677/g' /etc/hosts;
else
echo '127.0.1.1 embed-certs-491677' | sudo tee -a /etc/hosts;
fi
fi
I1122 00:20:10.825289 260527 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1122 00:20:10.825326 260527 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21934-9059/.minikube CaCertPath:/home/jenkins/minikube-integration/21934-9059/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21934-9059/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21934-9059/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21934-9059/.minikube}
I1122 00:20:10.825375 260527 ubuntu.go:190] setting up certificates
I1122 00:20:10.825411 260527 provision.go:84] configureAuth start
I1122 00:20:10.825489 260527 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" embed-certs-491677
I1122 00:20:10.844220 260527 provision.go:143] copyHostCerts
I1122 00:20:10.844298 260527 exec_runner.go:144] found /home/jenkins/minikube-integration/21934-9059/.minikube/ca.pem, removing ...
I1122 00:20:10.844307 260527 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21934-9059/.minikube/ca.pem
I1122 00:20:10.844403 260527 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21934-9059/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21934-9059/.minikube/ca.pem (1082 bytes)
I1122 00:20:10.844496 260527 exec_runner.go:144] found /home/jenkins/minikube-integration/21934-9059/.minikube/cert.pem, removing ...
I1122 00:20:10.844506 260527 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21934-9059/.minikube/cert.pem
I1122 00:20:10.844532 260527 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21934-9059/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21934-9059/.minikube/cert.pem (1123 bytes)
I1122 00:20:10.844590 260527 exec_runner.go:144] found /home/jenkins/minikube-integration/21934-9059/.minikube/key.pem, removing ...
I1122 00:20:10.844598 260527 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21934-9059/.minikube/key.pem
I1122 00:20:10.844620 260527 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21934-9059/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21934-9059/.minikube/key.pem (1679 bytes)
I1122 00:20:10.844669 260527 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21934-9059/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21934-9059/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21934-9059/.minikube/certs/ca-key.pem org=jenkins.embed-certs-491677 san=[127.0.0.1 192.168.85.2 embed-certs-491677 localhost minikube]
I1122 00:20:10.881095 260527 provision.go:177] copyRemoteCerts
I1122 00:20:10.881150 260527 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1122 00:20:10.881198 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:10.899974 260527 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33073 SSHKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/machines/embed-certs-491677/id_rsa Username:docker}
I1122 00:20:10.993091 260527 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21934-9059/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1122 00:20:11.014763 260527 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21934-9059/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1122 00:20:11.034702 260527 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21934-9059/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1122 00:20:11.053678 260527 provision.go:87] duration metric: took 228.246896ms to configureAuth
I1122 00:20:11.053708 260527 ubuntu.go:206] setting minikube options for container-runtime
I1122 00:20:11.053892 260527 config.go:182] Loaded profile config "embed-certs-491677": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1122 00:20:11.053909 260527 machine.go:97] duration metric: took 3.67941396s to provisionDockerMachine
I1122 00:20:11.053917 260527 client.go:176] duration metric: took 9.342299036s to LocalClient.Create
I1122 00:20:11.053943 260527 start.go:167] duration metric: took 9.342388491s to libmachine.API.Create "embed-certs-491677"
I1122 00:20:11.053956 260527 start.go:293] postStartSetup for "embed-certs-491677" (driver="docker")
I1122 00:20:11.053984 260527 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1122 00:20:11.054052 260527 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1122 00:20:11.054103 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:11.073167 260527 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33073 SSHKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/machines/embed-certs-491677/id_rsa Username:docker}
I1122 00:20:11.168158 260527 ssh_runner.go:195] Run: cat /etc/os-release
I1122 00:20:11.172076 260527 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1122 00:20:11.172422 260527 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1122 00:20:11.172459 260527 filesync.go:126] Scanning /home/jenkins/minikube-integration/21934-9059/.minikube/addons for local assets ...
I1122 00:20:11.172556 260527 filesync.go:126] Scanning /home/jenkins/minikube-integration/21934-9059/.minikube/files for local assets ...
I1122 00:20:11.172675 260527 filesync.go:149] local asset: /home/jenkins/minikube-integration/21934-9059/.minikube/files/etc/ssl/certs/145302.pem -> 145302.pem in /etc/ssl/certs
I1122 00:20:11.172811 260527 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1122 00:20:11.182207 260527 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21934-9059/.minikube/files/etc/ssl/certs/145302.pem --> /etc/ssl/certs/145302.pem (1708 bytes)
I1122 00:20:11.203784 260527 start.go:296] duration metric: took 149.811059ms for postStartSetup
I1122 00:20:11.204173 260527 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" embed-certs-491677
I1122 00:20:11.222954 260527 profile.go:143] Saving config to /home/jenkins/minikube-integration/21934-9059/.minikube/profiles/embed-certs-491677/config.json ...
I1122 00:20:11.223305 260527 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1122 00:20:11.223354 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:11.242018 260527 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33073 SSHKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/machines/embed-certs-491677/id_rsa Username:docker}
I1122 00:20:11.333726 260527 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1122 00:20:11.338527 260527 start.go:128] duration metric: took 9.62936097s to createHost
I1122 00:20:11.338558 260527 start.go:83] releasing machines lock for "embed-certs-491677", held for 9.629502399s
I1122 00:20:11.338631 260527 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" embed-certs-491677
I1122 00:20:11.357563 260527 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1122 00:20:11.357634 260527 ssh_runner.go:195] Run: cat /version.json
I1122 00:20:11.357684 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:11.357690 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:11.377098 260527 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33073 SSHKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/machines/embed-certs-491677/id_rsa Username:docker}
I1122 00:20:11.378067 260527 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33073 SSHKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/machines/embed-certs-491677/id_rsa Username:docker}
I1122 00:20:08.727161 218693 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1122 00:20:08.727652 218693 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1122 00:20:08.727710 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1122 00:20:08.727762 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1122 00:20:08.754498 218693 cri.go:89] found id: "031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:20:08.754522 218693 cri.go:89] found id: "2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:20:08.754527 218693 cri.go:89] found id: ""
I1122 00:20:08.754535 218693 logs.go:282] 2 containers: [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587]
I1122 00:20:08.754583 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:08.758867 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:08.762449 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1122 00:20:08.762501 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1122 00:20:08.788422 218693 cri.go:89] found id: "ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:20:08.788444 218693 cri.go:89] found id: ""
I1122 00:20:08.788455 218693 logs.go:282] 1 containers: [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7]
I1122 00:20:08.788512 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:08.792603 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1122 00:20:08.792668 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1122 00:20:08.820677 218693 cri.go:89] found id: ""
I1122 00:20:08.820703 218693 logs.go:282] 0 containers: []
W1122 00:20:08.820711 218693 logs.go:284] No container was found matching "coredns"
I1122 00:20:08.820717 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1122 00:20:08.820769 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1122 00:20:08.848396 218693 cri.go:89] found id: "8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:20:08.848418 218693 cri.go:89] found id: "b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:20:08.848422 218693 cri.go:89] found id: ""
I1122 00:20:08.848429 218693 logs.go:282] 2 containers: [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2]
I1122 00:20:08.848485 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:08.852633 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:08.856393 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1122 00:20:08.856469 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1122 00:20:08.884423 218693 cri.go:89] found id: ""
I1122 00:20:08.884454 218693 logs.go:282] 0 containers: []
W1122 00:20:08.884467 218693 logs.go:284] No container was found matching "kube-proxy"
I1122 00:20:08.884476 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1122 00:20:08.884529 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1122 00:20:08.911898 218693 cri.go:89] found id: "91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:20:08.911917 218693 cri.go:89] found id: "13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:20:08.911921 218693 cri.go:89] found id: ""
I1122 00:20:08.911928 218693 logs.go:282] 2 containers: [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216]
I1122 00:20:08.912000 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:08.916097 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:08.919808 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1122 00:20:08.919868 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1122 00:20:08.945704 218693 cri.go:89] found id: ""
I1122 00:20:08.945731 218693 logs.go:282] 0 containers: []
W1122 00:20:08.945742 218693 logs.go:284] No container was found matching "kindnet"
I1122 00:20:08.945750 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1122 00:20:08.945811 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1122 00:20:08.971599 218693 cri.go:89] found id: ""
I1122 00:20:08.971630 218693 logs.go:282] 0 containers: []
W1122 00:20:08.971642 218693 logs.go:284] No container was found matching "storage-provisioner"
I1122 00:20:08.971658 218693 logs.go:123] Gathering logs for dmesg ...
I1122 00:20:08.971686 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1122 00:20:08.985779 218693 logs.go:123] Gathering logs for etcd [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7] ...
I1122 00:20:08.985806 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:20:09.018373 218693 logs.go:123] Gathering logs for kube-scheduler [b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2] ...
I1122 00:20:09.018407 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:20:09.055328 218693 logs.go:123] Gathering logs for containerd ...
I1122 00:20:09.055359 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1122 00:20:09.098567 218693 logs.go:123] Gathering logs for kubelet ...
I1122 00:20:09.098608 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1122 00:20:09.183392 218693 logs.go:123] Gathering logs for describe nodes ...
I1122 00:20:09.183433 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1122 00:20:09.242636 218693 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1122 00:20:09.242654 218693 logs.go:123] Gathering logs for kube-apiserver [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d] ...
I1122 00:20:09.242666 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:20:09.276133 218693 logs.go:123] Gathering logs for kube-apiserver [2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587] ...
I1122 00:20:09.276179 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:20:09.310731 218693 logs.go:123] Gathering logs for kube-scheduler [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78] ...
I1122 00:20:09.310769 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:20:09.362187 218693 logs.go:123] Gathering logs for kube-controller-manager [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a] ...
I1122 00:20:09.362226 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:20:09.391737 218693 logs.go:123] Gathering logs for kube-controller-manager [13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216] ...
I1122 00:20:09.391763 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:20:09.425753 218693 logs.go:123] Gathering logs for container status ...
I1122 00:20:09.425787 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1122 00:20:11.959328 218693 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1122 00:20:11.959805 218693 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1122 00:20:11.959868 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1122 00:20:11.959935 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1122 00:20:11.993113 218693 cri.go:89] found id: "031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:20:11.993137 218693 cri.go:89] found id: "2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:20:11.993143 218693 cri.go:89] found id: ""
I1122 00:20:11.993153 218693 logs.go:282] 2 containers: [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587]
I1122 00:20:11.993213 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:11.997946 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:12.002616 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1122 00:20:12.002741 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1122 00:20:12.040113 218693 cri.go:89] found id: "ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:20:12.040150 218693 cri.go:89] found id: ""
I1122 00:20:12.040160 218693 logs.go:282] 1 containers: [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7]
I1122 00:20:12.040220 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:12.045665 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1122 00:20:12.045732 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1122 00:20:12.081343 218693 cri.go:89] found id: ""
I1122 00:20:12.081375 218693 logs.go:282] 0 containers: []
W1122 00:20:12.081384 218693 logs.go:284] No container was found matching "coredns"
I1122 00:20:12.081389 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1122 00:20:12.081449 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1122 00:20:12.116486 218693 cri.go:89] found id: "8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:20:12.117024 218693 cri.go:89] found id: "b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:20:12.117045 218693 cri.go:89] found id: ""
I1122 00:20:12.117055 218693 logs.go:282] 2 containers: [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2]
I1122 00:20:12.117115 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:12.121469 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:12.125453 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1122 00:20:12.125520 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1122 00:20:12.159076 218693 cri.go:89] found id: ""
I1122 00:20:12.159108 218693 logs.go:282] 0 containers: []
W1122 00:20:12.159121 218693 logs.go:284] No container was found matching "kube-proxy"
I1122 00:20:12.159130 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1122 00:20:12.159191 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1122 00:20:11.523900 260527 ssh_runner.go:195] Run: systemctl --version
I1122 00:20:11.531084 260527 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1122 00:20:11.536010 260527 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1122 00:20:11.536130 260527 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1122 00:20:11.563766 260527 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1122 00:20:11.563792 260527 start.go:496] detecting cgroup driver to use...
I1122 00:20:11.563830 260527 detect.go:190] detected "systemd" cgroup driver on host os
I1122 00:20:11.563873 260527 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1122 00:20:11.579543 260527 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1122 00:20:11.593598 260527 docker.go:218] disabling cri-docker service (if available) ...
I1122 00:20:11.593666 260527 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1122 00:20:11.610889 260527 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1122 00:20:11.629723 260527 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1122 00:20:11.730670 260527 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1122 00:20:11.819921 260527 docker.go:234] disabling docker service ...
I1122 00:20:11.819985 260527 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1122 00:20:11.839159 260527 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1122 00:20:11.854142 260527 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1122 00:20:11.943699 260527 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1122 00:20:12.053855 260527 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1122 00:20:12.073171 260527 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1122 00:20:12.089999 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1122 00:20:12.105012 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1122 00:20:12.117591 260527 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1122 00:20:12.117652 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1122 00:20:12.128817 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1122 00:20:12.142147 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1122 00:20:12.154635 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1122 00:20:12.169029 260527 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1122 00:20:12.181631 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1122 00:20:12.194568 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1122 00:20:12.207294 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1122 00:20:12.218684 260527 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1122 00:20:12.228679 260527 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1122 00:20:12.241707 260527 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1122 00:20:12.337447 260527 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1122 00:20:12.443801 260527 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1122 00:20:12.443870 260527 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1122 00:20:12.448114 260527 start.go:564] Will wait 60s for crictl version
I1122 00:20:12.448178 260527 ssh_runner.go:195] Run: which crictl
I1122 00:20:12.452113 260527 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1122 00:20:12.481619 260527 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1122 00:20:12.481687 260527 ssh_runner.go:195] Run: containerd --version
I1122 00:20:12.506954 260527 ssh_runner.go:195] Run: containerd --version
I1122 00:20:12.537127 260527 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
W1122 00:20:08.528688 251199 node_ready.go:57] node "no-preload-781232" has "Ready":"False" status (will retry)
W1122 00:20:10.529626 251199 node_ready.go:57] node "no-preload-781232" has "Ready":"False" status (will retry)
I1122 00:20:12.029744 251199 node_ready.go:49] node "no-preload-781232" is "Ready"
I1122 00:20:12.029782 251199 node_ready.go:38] duration metric: took 14.503754974s for node "no-preload-781232" to be "Ready" ...
I1122 00:20:12.029799 251199 api_server.go:52] waiting for apiserver process to appear ...
I1122 00:20:12.029867 251199 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1122 00:20:12.049755 251199 api_server.go:72] duration metric: took 14.826557708s to wait for apiserver process to appear ...
I1122 00:20:12.049782 251199 api_server.go:88] waiting for apiserver healthz status ...
I1122 00:20:12.049803 251199 api_server.go:253] Checking apiserver healthz at https://192.168.94.2:8443/healthz ...
I1122 00:20:12.055733 251199 api_server.go:279] https://192.168.94.2:8443/healthz returned 200:
ok
I1122 00:20:12.057374 251199 api_server.go:141] control plane version: v1.34.1
I1122 00:20:12.057405 251199 api_server.go:131] duration metric: took 7.61544ms to wait for apiserver health ...
I1122 00:20:12.057416 251199 system_pods.go:43] waiting for kube-system pods to appear ...
I1122 00:20:12.062154 251199 system_pods.go:59] 8 kube-system pods found
I1122 00:20:12.062190 251199 system_pods.go:61] "coredns-66bc5c9577-9wcct" [67b97cc5-016b-44d1-8119-dd6aa4932f83] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:12.062199 251199 system_pods.go:61] "etcd-no-preload-781232" [85c9627b-3102-439d-83e4-9ee3353591c1] Running
I1122 00:20:12.062207 251199 system_pods.go:61] "kindnet-llcnc" [fcdd9f25-4804-47c2-8f09-b6a2d688a8bc] Running
I1122 00:20:12.062212 251199 system_pods.go:61] "kube-apiserver-no-preload-781232" [4a4b5bf8-8262-46c5-9aa8-5a0bb0af364c] Running
I1122 00:20:12.062218 251199 system_pods.go:61] "kube-controller-manager-no-preload-781232" [0c4fed80-9ce3-4b0d-99dd-ae11fc92104e] Running
I1122 00:20:12.062223 251199 system_pods.go:61] "kube-proxy-685jg" [33a2d2c1-e364-4ec8-a9a0-69ba9146625f] Running
I1122 00:20:12.062228 251199 system_pods.go:61] "kube-scheduler-no-preload-781232" [ec2ea83e-6638-4945-b4e4-ef3142f30481] Running
I1122 00:20:12.062237 251199 system_pods.go:61] "storage-provisioner" [904bdf70-7728-45c5-a9ae-487aed28e6fc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1122 00:20:12.062245 251199 system_pods.go:74] duration metric: took 4.821603ms to wait for pod list to return data ...
I1122 00:20:12.062254 251199 default_sa.go:34] waiting for default service account to be created ...
I1122 00:20:12.065112 251199 default_sa.go:45] found service account: "default"
I1122 00:20:12.065138 251199 default_sa.go:55] duration metric: took 2.848928ms for default service account to be created ...
I1122 00:20:12.065149 251199 system_pods.go:116] waiting for k8s-apps to be running ...
I1122 00:20:12.069582 251199 system_pods.go:86] 8 kube-system pods found
I1122 00:20:12.069625 251199 system_pods.go:89] "coredns-66bc5c9577-9wcct" [67b97cc5-016b-44d1-8119-dd6aa4932f83] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:12.069633 251199 system_pods.go:89] "etcd-no-preload-781232" [85c9627b-3102-439d-83e4-9ee3353591c1] Running
I1122 00:20:12.069648 251199 system_pods.go:89] "kindnet-llcnc" [fcdd9f25-4804-47c2-8f09-b6a2d688a8bc] Running
I1122 00:20:12.069655 251199 system_pods.go:89] "kube-apiserver-no-preload-781232" [4a4b5bf8-8262-46c5-9aa8-5a0bb0af364c] Running
I1122 00:20:12.069661 251199 system_pods.go:89] "kube-controller-manager-no-preload-781232" [0c4fed80-9ce3-4b0d-99dd-ae11fc92104e] Running
I1122 00:20:12.069666 251199 system_pods.go:89] "kube-proxy-685jg" [33a2d2c1-e364-4ec8-a9a0-69ba9146625f] Running
I1122 00:20:12.069670 251199 system_pods.go:89] "kube-scheduler-no-preload-781232" [ec2ea83e-6638-4945-b4e4-ef3142f30481] Running
I1122 00:20:12.069676 251199 system_pods.go:89] "storage-provisioner" [904bdf70-7728-45c5-a9ae-487aed28e6fc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1122 00:20:12.069728 251199 retry.go:31] will retry after 227.269849ms: missing components: kube-dns
I1122 00:20:12.301834 251199 system_pods.go:86] 8 kube-system pods found
I1122 00:20:12.301869 251199 system_pods.go:89] "coredns-66bc5c9577-9wcct" [67b97cc5-016b-44d1-8119-dd6aa4932f83] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:12.301877 251199 system_pods.go:89] "etcd-no-preload-781232" [85c9627b-3102-439d-83e4-9ee3353591c1] Running
I1122 00:20:12.301886 251199 system_pods.go:89] "kindnet-llcnc" [fcdd9f25-4804-47c2-8f09-b6a2d688a8bc] Running
I1122 00:20:12.301892 251199 system_pods.go:89] "kube-apiserver-no-preload-781232" [4a4b5bf8-8262-46c5-9aa8-5a0bb0af364c] Running
I1122 00:20:12.301898 251199 system_pods.go:89] "kube-controller-manager-no-preload-781232" [0c4fed80-9ce3-4b0d-99dd-ae11fc92104e] Running
I1122 00:20:12.301903 251199 system_pods.go:89] "kube-proxy-685jg" [33a2d2c1-e364-4ec8-a9a0-69ba9146625f] Running
I1122 00:20:12.301910 251199 system_pods.go:89] "kube-scheduler-no-preload-781232" [ec2ea83e-6638-4945-b4e4-ef3142f30481] Running
I1122 00:20:12.301917 251199 system_pods.go:89] "storage-provisioner" [904bdf70-7728-45c5-a9ae-487aed28e6fc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1122 00:20:12.301938 251199 retry.go:31] will retry after 387.887736ms: missing components: kube-dns
I1122 00:20:12.694992 251199 system_pods.go:86] 8 kube-system pods found
I1122 00:20:12.695026 251199 system_pods.go:89] "coredns-66bc5c9577-9wcct" [67b97cc5-016b-44d1-8119-dd6aa4932f83] Running
I1122 00:20:12.695035 251199 system_pods.go:89] "etcd-no-preload-781232" [85c9627b-3102-439d-83e4-9ee3353591c1] Running
I1122 00:20:12.695041 251199 system_pods.go:89] "kindnet-llcnc" [fcdd9f25-4804-47c2-8f09-b6a2d688a8bc] Running
I1122 00:20:12.695047 251199 system_pods.go:89] "kube-apiserver-no-preload-781232" [4a4b5bf8-8262-46c5-9aa8-5a0bb0af364c] Running
I1122 00:20:12.695052 251199 system_pods.go:89] "kube-controller-manager-no-preload-781232" [0c4fed80-9ce3-4b0d-99dd-ae11fc92104e] Running
I1122 00:20:12.695060 251199 system_pods.go:89] "kube-proxy-685jg" [33a2d2c1-e364-4ec8-a9a0-69ba9146625f] Running
I1122 00:20:12.695065 251199 system_pods.go:89] "kube-scheduler-no-preload-781232" [ec2ea83e-6638-4945-b4e4-ef3142f30481] Running
I1122 00:20:12.695070 251199 system_pods.go:89] "storage-provisioner" [904bdf70-7728-45c5-a9ae-487aed28e6fc] Running
I1122 00:20:12.695080 251199 system_pods.go:126] duration metric: took 629.924123ms to wait for k8s-apps to be running ...
I1122 00:20:12.695093 251199 system_svc.go:44] waiting for kubelet service to be running ....
I1122 00:20:12.695144 251199 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1122 00:20:12.708823 251199 system_svc.go:56] duration metric: took 13.721013ms WaitForService to wait for kubelet
I1122 00:20:12.708855 251199 kubeadm.go:587] duration metric: took 15.485663176s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1122 00:20:12.708874 251199 node_conditions.go:102] verifying NodePressure condition ...
I1122 00:20:12.712345 251199 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1122 00:20:12.712376 251199 node_conditions.go:123] node cpu capacity is 8
I1122 00:20:12.712396 251199 node_conditions.go:105] duration metric: took 3.516354ms to run NodePressure ...
I1122 00:20:12.712412 251199 start.go:242] waiting for startup goroutines ...
I1122 00:20:12.712423 251199 start.go:247] waiting for cluster config update ...
I1122 00:20:12.712441 251199 start.go:256] writing updated cluster config ...
I1122 00:20:12.712733 251199 ssh_runner.go:195] Run: rm -f paused
I1122 00:20:12.717390 251199 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1122 00:20:12.721696 251199 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-9wcct" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:12.726947 251199 pod_ready.go:94] pod "coredns-66bc5c9577-9wcct" is "Ready"
I1122 00:20:12.726976 251199 pod_ready.go:86] duration metric: took 5.255643ms for pod "coredns-66bc5c9577-9wcct" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:12.729559 251199 pod_ready.go:83] waiting for pod "etcd-no-preload-781232" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:12.734425 251199 pod_ready.go:94] pod "etcd-no-preload-781232" is "Ready"
I1122 00:20:12.734455 251199 pod_ready.go:86] duration metric: took 4.86467ms for pod "etcd-no-preload-781232" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:12.736916 251199 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-781232" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:12.741485 251199 pod_ready.go:94] pod "kube-apiserver-no-preload-781232" is "Ready"
I1122 00:20:12.741515 251199 pod_ready.go:86] duration metric: took 4.574913ms for pod "kube-apiserver-no-preload-781232" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:12.743848 251199 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-781232" in "kube-system" namespace to be "Ready" or be gone ...
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
d01de905a2d07 56cc512116c8f 6 seconds ago Running busybox 0 e511b813570c1 busybox default
f7527a8afc668 ead0a4a53df89 13 seconds ago Running coredns 0 b00fa05a6c375 coredns-5dd5756b68-pqbfp kube-system
f2a1ec178c227 6e38f40d628db 13 seconds ago Running storage-provisioner 0 a3bbedf747991 storage-provisioner kube-system
abad042f2a4ad 409467f978b4a 24 seconds ago Running kindnet-cni 0 721fcd34a44d6 kindnet-ldtd8 kube-system
5119ee9a69fb3 ea1030da44aa1 28 seconds ago Running kube-proxy 0 be780c30602ce kube-proxy-kqrng kube-system
4c35680ab2dd6 73deb9a3f7025 47 seconds ago Running etcd 0 adbbfe9941b27 etcd-old-k8s-version-462319 kube-system
1863b35aae093 f6f496300a2ae 47 seconds ago Running kube-scheduler 0 45afb7772f575 kube-scheduler-old-k8s-version-462319 kube-system
e398c42ad8188 bb5e0dde9054c 47 seconds ago Running kube-apiserver 0 0ce7c78109ce7 kube-apiserver-old-k8s-version-462319 kube-system
355ecffe75a3f 4be79c38a4bab 47 seconds ago Running kube-controller-manager 0 5dfd6ffd80d1f kube-controller-manager-old-k8s-version-462319 kube-system
==> containerd <==
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.327237013Z" level=info msg="connecting to shim f2a1ec178c227617bd32e678c94e3d44e606683f0b10ccdbc182dec6d6d5c9e9" address="unix:///run/containerd/s/62835cccd20d8437bb636df9ea457fe2506fdd9387d47f5e31a45c75f852a444" protocol=ttrpc version=3
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.328631129Z" level=info msg="CreateContainer within sandbox \"b00fa05a6c375cb07b56b89e739f90401ad7f950dedcb886ca1774eba46a4293\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.336790890Z" level=info msg="Container f7527a8afc6683a9935b781bf3006cc9c368a534f3eafba3501b6509659a437b: CDI devices from CRI Config.CDIDevices: []"
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.343474448Z" level=info msg="CreateContainer within sandbox \"b00fa05a6c375cb07b56b89e739f90401ad7f950dedcb886ca1774eba46a4293\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"f7527a8afc6683a9935b781bf3006cc9c368a534f3eafba3501b6509659a437b\""
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.344107519Z" level=info msg="StartContainer for \"f7527a8afc6683a9935b781bf3006cc9c368a534f3eafba3501b6509659a437b\""
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.345166179Z" level=info msg="connecting to shim f7527a8afc6683a9935b781bf3006cc9c368a534f3eafba3501b6509659a437b" address="unix:///run/containerd/s/39593751a6c9fe87428291df6153bccdab6c22a754601ae94cfc40e697ece6ec" protocol=ttrpc version=3
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.389133316Z" level=info msg="StartContainer for \"f2a1ec178c227617bd32e678c94e3d44e606683f0b10ccdbc182dec6d6d5c9e9\" returns successfully"
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.404040136Z" level=info msg="StartContainer for \"f7527a8afc6683a9935b781bf3006cc9c368a534f3eafba3501b6509659a437b\" returns successfully"
Nov 22 00:20:05 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:05.083706178Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:89dd9411-148d-4a8e-98d3-a51a8eab9d35,Namespace:default,Attempt:0,}"
Nov 22 00:20:05 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:05.877683353Z" level=info msg="connecting to shim e511b813570c19e1d5c5c2002304caba5cc1bac5847092a53135ba9cb1b1dd7c" address="unix:///run/containerd/s/b045fc79abfabe20fc9affb730c643e7c442531994f349b7904cd7f34ab0272a" namespace=k8s.io protocol=ttrpc version=3
Nov 22 00:20:06 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:06.066243350Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:89dd9411-148d-4a8e-98d3-a51a8eab9d35,Namespace:default,Attempt:0,} returns sandbox id \"e511b813570c19e1d5c5c2002304caba5cc1bac5847092a53135ba9cb1b1dd7c\""
Nov 22 00:20:06 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:06.068244404Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.300595484Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.301398927Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396644"
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.302750252Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.304853958Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.305213907Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.236905893s"
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.305247082Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.306892429Z" level=info msg="CreateContainer within sandbox \"e511b813570c19e1d5c5c2002304caba5cc1bac5847092a53135ba9cb1b1dd7c\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.314973197Z" level=info msg="Container d01de905a2d0700ad9691d5a73cf41f69bb587ec67e218858862ae31fcd53485: CDI devices from CRI Config.CDIDevices: []"
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.321465429Z" level=info msg="CreateContainer within sandbox \"e511b813570c19e1d5c5c2002304caba5cc1bac5847092a53135ba9cb1b1dd7c\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"d01de905a2d0700ad9691d5a73cf41f69bb587ec67e218858862ae31fcd53485\""
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.322134703Z" level=info msg="StartContainer for \"d01de905a2d0700ad9691d5a73cf41f69bb587ec67e218858862ae31fcd53485\""
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.323141205Z" level=info msg="connecting to shim d01de905a2d0700ad9691d5a73cf41f69bb587ec67e218858862ae31fcd53485" address="unix:///run/containerd/s/b045fc79abfabe20fc9affb730c643e7c442531994f349b7904cd7f34ab0272a" protocol=ttrpc version=3
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.376916692Z" level=info msg="StartContainer for \"d01de905a2d0700ad9691d5a73cf41f69bb587ec67e218858862ae31fcd53485\" returns successfully"
Nov 22 00:20:13 old-k8s-version-462319 containerd[666]: E1122 00:20:13.803924 666 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [f7527a8afc6683a9935b781bf3006cc9c368a534f3eafba3501b6509659a437b] <==
[INFO] plugin/ready: Still waiting on: "kubernetes"
.:53
[INFO] plugin/reload: Running configuration SHA512 = 25cf5af2951e282c4b0e961a02fb5d3e57c974501832fee92eec17b5135b9ec9d9e87d2ac94e6d117a5ed3dd54e8800aa7b4479706eb54497145ccdb80397d1b
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:60216 - 50495 "HINFO IN 8122801349455611517.3511563579879947437. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.074291599s
==> describe nodes <==
Name: old-k8s-version-462319
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-462319
kubernetes.io/os=linux
minikube.k8s.io/commit=299bbe887a12c40541707cc636234f35f4ff1785
minikube.k8s.io/name=old-k8s-version-462319
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_22T00_19_34_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sat, 22 Nov 2025 00:19:29 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-462319
AcquireTime: <unset>
RenewTime: Sat, 22 Nov 2025 00:20:14 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sat, 22 Nov 2025 00:20:04 +0000 Sat, 22 Nov 2025 00:19:28 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 22 Nov 2025 00:20:04 +0000 Sat, 22 Nov 2025 00:19:28 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 22 Nov 2025 00:20:04 +0000 Sat, 22 Nov 2025 00:19:28 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sat, 22 Nov 2025 00:20:04 +0000 Sat, 22 Nov 2025 00:20:00 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.103.2
Hostname: old-k8s-version-462319
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
System Info:
Machine ID: 5665009e93b91d39dc05718b691e3875
System UUID: 1a763c28-0497-45f3-b9e8-458b8b4eb589
Boot ID: 725aae03-f893-4e0b-b029-cbd3b00ccfdd
Kernel Version: 6.8.0-1044-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11s
kube-system coredns-5dd5756b68-pqbfp 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 29s
kube-system etcd-old-k8s-version-462319 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 42s
kube-system kindnet-ldtd8 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 29s
kube-system kube-apiserver-old-k8s-version-462319 250m (3%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system kube-controller-manager-old-k8s-version-462319 200m (2%) 0 (0%) 0 (0%) 0 (0%) 44s
kube-system kube-proxy-kqrng 0 (0%) 0 (0%) 0 (0%) 0 (0%) 29s
kube-system kube-scheduler-old-k8s-version-462319 100m (1%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 28s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 27s kube-proxy
Normal Starting 42s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 42s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 42s kubelet Node old-k8s-version-462319 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 42s kubelet Node old-k8s-version-462319 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 42s kubelet Node old-k8s-version-462319 status is now: NodeHasSufficientPID
Normal RegisteredNode 30s node-controller Node old-k8s-version-462319 event: Registered Node old-k8s-version-462319 in Controller
Normal NodeReady 15s kubelet Node old-k8s-version-462319 status is now: NodeReady
==> dmesg <==
[Nov21 23:17] MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.
[ +0.000865] TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.
[ +0.001000] MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.
[ +0.087013] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.410276] i8042: Warning: Keylock active
[ +0.014947] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.495836] block sda: the capability attribute has been deprecated.
[ +0.091740] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.024333] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +5.452540] kauditd_printk_skb: 47 callbacks suppressed
==> etcd [4c35680ab2dd6966de549749b29af9a5a8bccb172d03360ef57391e45ea9f885] <==
{"level":"info","ts":"2025-11-22T00:19:28.060277Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 became leader at term 2"}
{"level":"info","ts":"2025-11-22T00:19:28.060288Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: f23060b075c4c089 elected leader f23060b075c4c089 at term 2"}
{"level":"info","ts":"2025-11-22T00:19:28.061026Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-22T00:19:28.061614Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-22T00:19:28.061614Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"f23060b075c4c089","local-member-attributes":"{Name:old-k8s-version-462319 ClientURLs:[https://192.168.103.2:2379]}","request-path":"/0/members/f23060b075c4c089/attributes","cluster-id":"3336683c081d149d","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-22T00:19:28.061648Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-22T00:19:28.06183Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"3336683c081d149d","local-member-id":"f23060b075c4c089","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-22T00:19:28.0621Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-22T00:19:28.062388Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-22T00:19:28.062242Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-22T00:19:28.062743Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-22T00:19:28.064288Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.103.2:2379"}
{"level":"info","ts":"2025-11-22T00:19:28.064366Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-22T00:19:32.633697Z","caller":"traceutil/trace.go:171","msg":"trace[64928526] transaction","detail":"{read_only:false; response_revision:210; number_of_response:1; }","duration":"260.007025ms","start":"2025-11-22T00:19:32.373672Z","end":"2025-11-22T00:19:32.633679Z","steps":["trace[64928526] 'process raft request' (duration: 259.898405ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-22T00:19:33.081079Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"335.74286ms","expected-duration":"100ms","prefix":"","request":"header:<ID:13873790177431359743 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/secrets/kube-system/bootstrap-token-vumgow\" mod_revision:0 > success:<request_put:<key:\"/registry/secrets/kube-system/bootstrap-token-vumgow\" value_size:617 >> failure:<>>","response":"size:16"}
{"level":"info","ts":"2025-11-22T00:19:33.081182Z","caller":"traceutil/trace.go:171","msg":"trace[454440905] transaction","detail":"{read_only:false; response_revision:211; number_of_response:1; }","duration":"441.168552ms","start":"2025-11-22T00:19:32.639997Z","end":"2025-11-22T00:19:33.081166Z","steps":["trace[454440905] 'process raft request' (duration: 104.950033ms)","trace[454440905] 'compare' (duration: 335.635432ms)"],"step_count":2}
{"level":"warn","ts":"2025-11-22T00:19:33.081293Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2025-11-22T00:19:32.63998Z","time spent":"441.252908ms","remote":"127.0.0.1:42828","response type":"/etcdserverpb.KV/Txn","request count":1,"request size":677,"response count":0,"response size":39,"request content":"compare:<target:MOD key:\"/registry/secrets/kube-system/bootstrap-token-vumgow\" mod_revision:0 > success:<request_put:<key:\"/registry/secrets/kube-system/bootstrap-token-vumgow\" value_size:617 >> failure:<>"}
{"level":"warn","ts":"2025-11-22T00:19:44.266771Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"130.299403ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/serviceaccounts/kube-system/bootstrap-signer\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-11-22T00:19:44.266864Z","caller":"traceutil/trace.go:171","msg":"trace[842289003] range","detail":"{range_begin:/registry/serviceaccounts/kube-system/bootstrap-signer; range_end:; response_count:0; response_revision:282; }","duration":"130.453771ms","start":"2025-11-22T00:19:44.136394Z","end":"2025-11-22T00:19:44.266847Z","steps":["trace[842289003] 'range keys from in-memory index tree' (duration: 130.216573ms)"],"step_count":1}
{"level":"info","ts":"2025-11-22T00:19:44.386458Z","caller":"traceutil/trace.go:171","msg":"trace[490276607] linearizableReadLoop","detail":"{readStateIndex:296; appliedIndex:295; }","duration":"101.94453ms","start":"2025-11-22T00:19:44.284493Z","end":"2025-11-22T00:19:44.386437Z","steps":["trace[490276607] 'read index received' (duration: 101.776407ms)","trace[490276607] 'applied index is now lower than readState.Index' (duration: 167.67µs)"],"step_count":2}
{"level":"info","ts":"2025-11-22T00:19:44.386547Z","caller":"traceutil/trace.go:171","msg":"trace[1514742623] transaction","detail":"{read_only:false; response_revision:283; number_of_response:1; }","duration":"114.786396ms","start":"2025-11-22T00:19:44.271741Z","end":"2025-11-22T00:19:44.386527Z","steps":["trace[1514742623] 'process raft request' (duration: 114.589176ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-22T00:19:44.386605Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"102.121151ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-11-22T00:19:44.386631Z","caller":"traceutil/trace.go:171","msg":"trace[800592602] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:283; }","duration":"102.163591ms","start":"2025-11-22T00:19:44.284459Z","end":"2025-11-22T00:19:44.386622Z","steps":["trace[800592602] 'agreement among raft nodes before linearized reading' (duration: 102.059746ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-22T00:20:06.401485Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"116.691938ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-11-22T00:20:06.401571Z","caller":"traceutil/trace.go:171","msg":"trace[919203119] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:425; }","duration":"116.801997ms","start":"2025-11-22T00:20:06.284749Z","end":"2025-11-22T00:20:06.401551Z","steps":["trace[919203119] 'range keys from in-memory index tree' (duration: 116.607287ms)"],"step_count":1}
==> kernel <==
00:20:15 up 1:02, 0 user, load average: 6.48, 3.76, 2.29
Linux old-k8s-version-462319 6.8.0-1044-gcp #47~22.04.1-Ubuntu SMP Thu Oct 23 21:07:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [abad042f2a4adf0bb5a1e42eb6090d0433dbd093e2502e0a0763cd88008fa485] <==
I1122 00:19:50.358053 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1122 00:19:50.379516 1 main.go:139] hostIP = 192.168.103.2
podIP = 192.168.103.2
I1122 00:19:50.379673 1 main.go:148] setting mtu 1500 for CNI
I1122 00:19:50.379699 1 main.go:178] kindnetd IP family: "ipv4"
I1122 00:19:50.379728 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-22T00:19:50Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1122 00:19:50.657926 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1122 00:19:50.657947 1 controller.go:381] "Waiting for informer caches to sync"
I1122 00:19:50.657972 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1122 00:19:50.658082 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1122 00:19:50.980378 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1122 00:19:50.980413 1 metrics.go:72] Registering metrics
I1122 00:19:50.980477 1 controller.go:711] "Syncing nftables rules"
I1122 00:20:00.663360 1 main.go:297] Handling node with IPs: map[192.168.103.2:{}]
I1122 00:20:00.663424 1 main.go:301] handling current node
I1122 00:20:10.657535 1 main.go:297] Handling node with IPs: map[192.168.103.2:{}]
I1122 00:20:10.657598 1 main.go:301] handling current node
==> kube-apiserver [e398c42ad8188a2a96d101f089a0968d374f75b6827a154f004bd956b9155274] <==
I1122 00:19:29.739253 1 apf_controller.go:380] Running API Priority and Fairness periodic rebalancing process
I1122 00:19:29.739494 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1122 00:19:29.739756 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1122 00:19:29.739791 1 aggregator.go:166] initial CRD sync complete...
I1122 00:19:29.739800 1 autoregister_controller.go:141] Starting autoregister controller
I1122 00:19:29.739807 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1122 00:19:29.739814 1 cache.go:39] Caches are synced for autoregister controller
I1122 00:19:29.740221 1 controller.go:624] quota admission added evaluator for: namespaces
I1122 00:19:29.740304 1 shared_informer.go:318] Caches are synced for configmaps
I1122 00:19:29.936021 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1122 00:19:30.645531 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1122 00:19:30.649522 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1122 00:19:30.649546 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1122 00:19:31.151928 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1122 00:19:31.192786 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1122 00:19:31.249628 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1122 00:19:31.255812 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.103.2]
I1122 00:19:31.257056 1 controller.go:624] quota admission added evaluator for: endpoints
I1122 00:19:31.261743 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1122 00:19:31.700612 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1122 00:19:33.349558 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1122 00:19:33.363593 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1122 00:19:33.376299 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1122 00:19:46.344730 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1122 00:19:46.397570 1 controller.go:624] quota admission added evaluator for: replicasets.apps
==> kube-controller-manager [355ecffe75a3ff0874bfe775cd91a06b9bfff9f2dc65c709c3da1adca76e11c1] <==
I1122 00:19:45.646325 1 shared_informer.go:318] Caches are synced for resource quota
I1122 00:19:45.687399 1 shared_informer.go:318] Caches are synced for disruption
I1122 00:19:45.693911 1 shared_informer.go:318] Caches are synced for resource quota
I1122 00:19:46.009572 1 shared_informer.go:318] Caches are synced for garbage collector
I1122 00:19:46.084787 1 shared_informer.go:318] Caches are synced for garbage collector
I1122 00:19:46.084820 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1122 00:19:46.355549 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-kqrng"
I1122 00:19:46.357410 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-ldtd8"
I1122 00:19:46.402945 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1122 00:19:46.497513 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-pqbfp"
I1122 00:19:46.505494 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-bjgv6"
I1122 00:19:46.515365 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="112.69029ms"
I1122 00:19:46.537252 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="21.812757ms"
I1122 00:19:46.537541 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="177.843µs"
I1122 00:19:47.048823 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1122 00:19:47.070179 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-bjgv6"
I1122 00:19:47.078565 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="31.08623ms"
I1122 00:19:47.085902 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="7.261706ms"
I1122 00:19:47.086048 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="90.581µs"
I1122 00:20:00.892386 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="138.286µs"
I1122 00:20:00.912888 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="84.033µs"
I1122 00:20:01.551233 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="124.993µs"
I1122 00:20:02.562092 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="7.330757ms"
I1122 00:20:02.562207 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="69.9µs"
I1122 00:20:05.541105 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [5119ee9a69fb309c6fe6c40bfdf7853c1d5fd0390280d45b28a695bd3259a0c0] <==
I1122 00:19:47.043350 1 server_others.go:69] "Using iptables proxy"
I1122 00:19:47.061630 1 node.go:141] Successfully retrieved node IP: 192.168.103.2
I1122 00:19:47.101193 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1122 00:19:47.103704 1 server_others.go:152] "Using iptables Proxier"
I1122 00:19:47.103745 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1122 00:19:47.103755 1 server_others.go:438] "Defaulting to no-op detect-local"
I1122 00:19:47.103806 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1122 00:19:47.104104 1 server.go:846] "Version info" version="v1.28.0"
I1122 00:19:47.104124 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1122 00:19:47.104828 1 config.go:188] "Starting service config controller"
I1122 00:19:47.104867 1 shared_informer.go:311] Waiting for caches to sync for service config
I1122 00:19:47.104926 1 config.go:97] "Starting endpoint slice config controller"
I1122 00:19:47.104932 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1122 00:19:47.105174 1 config.go:315] "Starting node config controller"
I1122 00:19:47.105210 1 shared_informer.go:311] Waiting for caches to sync for node config
I1122 00:19:47.205514 1 shared_informer.go:318] Caches are synced for endpoint slice config
I1122 00:19:47.205516 1 shared_informer.go:318] Caches are synced for service config
I1122 00:19:47.205561 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [1863b35aae093f7c8f897de1e1301f7582ed68975578bf5d2f19a845b5bbb715] <==
W1122 00:19:29.717451 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1122 00:19:29.717478 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1122 00:19:29.717458 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1122 00:19:29.717515 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1122 00:19:29.717553 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1122 00:19:29.717616 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1122 00:19:29.717652 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1122 00:19:29.717675 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1122 00:19:30.562109 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1122 00:19:30.562139 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1122 00:19:30.586044 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1122 00:19:30.586087 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1122 00:19:30.770112 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1122 00:19:30.770162 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1122 00:19:30.772555 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1122 00:19:30.772599 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1122 00:19:30.781374 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1122 00:19:30.781431 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W1122 00:19:30.807504 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1122 00:19:30.807533 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1122 00:19:30.845180 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1122 00:19:30.845236 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W1122 00:19:30.871051 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1122 00:19:30.871090 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
I1122 00:19:33.910375 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 22 00:19:45 old-k8s-version-462319 kubelet[1521]: I1122 00:19:45.613796 1521 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.364926 1521 topology_manager.go:215] "Topology Admit Handler" podUID="643cd348-4af3-4720-af0d-e931f184742c" podNamespace="kube-system" podName="kube-proxy-kqrng"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.365817 1521 topology_manager.go:215] "Topology Admit Handler" podUID="6bf161d2-c442-466d-98b8-c313a127bf22" podNamespace="kube-system" podName="kindnet-ldtd8"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.396776 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-295rj\" (UniqueName: \"kubernetes.io/projected/643cd348-4af3-4720-af0d-e931f184742c-kube-api-access-295rj\") pod \"kube-proxy-kqrng\" (UID: \"643cd348-4af3-4720-af0d-e931f184742c\") " pod="kube-system/kube-proxy-kqrng"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.398874 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/643cd348-4af3-4720-af0d-e931f184742c-lib-modules\") pod \"kube-proxy-kqrng\" (UID: \"643cd348-4af3-4720-af0d-e931f184742c\") " pod="kube-system/kube-proxy-kqrng"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.398955 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/6bf161d2-c442-466d-98b8-c313a127bf22-cni-cfg\") pod \"kindnet-ldtd8\" (UID: \"6bf161d2-c442-466d-98b8-c313a127bf22\") " pod="kube-system/kindnet-ldtd8"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.398980 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/6bf161d2-c442-466d-98b8-c313a127bf22-xtables-lock\") pod \"kindnet-ldtd8\" (UID: \"6bf161d2-c442-466d-98b8-c313a127bf22\") " pod="kube-system/kindnet-ldtd8"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.399025 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/6bf161d2-c442-466d-98b8-c313a127bf22-lib-modules\") pod \"kindnet-ldtd8\" (UID: \"6bf161d2-c442-466d-98b8-c313a127bf22\") " pod="kube-system/kindnet-ldtd8"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.399054 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/643cd348-4af3-4720-af0d-e931f184742c-kube-proxy\") pod \"kube-proxy-kqrng\" (UID: \"643cd348-4af3-4720-af0d-e931f184742c\") " pod="kube-system/kube-proxy-kqrng"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.399082 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/643cd348-4af3-4720-af0d-e931f184742c-xtables-lock\") pod \"kube-proxy-kqrng\" (UID: \"643cd348-4af3-4720-af0d-e931f184742c\") " pod="kube-system/kube-proxy-kqrng"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.399117 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwtxn\" (UniqueName: \"kubernetes.io/projected/6bf161d2-c442-466d-98b8-c313a127bf22-kube-api-access-xwtxn\") pod \"kindnet-ldtd8\" (UID: \"6bf161d2-c442-466d-98b8-c313a127bf22\") " pod="kube-system/kindnet-ldtd8"
Nov 22 00:19:47 old-k8s-version-462319 kubelet[1521]: I1122 00:19:47.509109 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-kqrng" podStartSLOduration=1.509057216 podCreationTimestamp="2025-11-22 00:19:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 00:19:47.50894671 +0000 UTC m=+14.188238544" watchObservedRunningTime="2025-11-22 00:19:47.509057216 +0000 UTC m=+14.188349048"
Nov 22 00:19:50 old-k8s-version-462319 kubelet[1521]: I1122 00:19:50.516088 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-ldtd8" podStartSLOduration=1.666002271 podCreationTimestamp="2025-11-22 00:19:46 +0000 UTC" firstStartedPulling="2025-11-22 00:19:47.157978554 +0000 UTC m=+13.837270379" lastFinishedPulling="2025-11-22 00:19:50.007957975 +0000 UTC m=+16.687249802" observedRunningTime="2025-11-22 00:19:50.515675934 +0000 UTC m=+17.194967778" watchObservedRunningTime="2025-11-22 00:19:50.515981694 +0000 UTC m=+17.195273528"
Nov 22 00:20:00 old-k8s-version-462319 kubelet[1521]: I1122 00:20:00.709466 1521 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 22 00:20:00 old-k8s-version-462319 kubelet[1521]: I1122 00:20:00.889924 1521 topology_manager.go:215] "Topology Admit Handler" podUID="fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2" podNamespace="kube-system" podName="storage-provisioner"
Nov 22 00:20:00 old-k8s-version-462319 kubelet[1521]: I1122 00:20:00.892871 1521 topology_manager.go:215] "Topology Admit Handler" podUID="44750e8d-5eeb-4845-9029-a58cbf976b62" podNamespace="kube-system" podName="coredns-5dd5756b68-pqbfp"
Nov 22 00:20:00 old-k8s-version-462319 kubelet[1521]: I1122 00:20:00.993531 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/44750e8d-5eeb-4845-9029-a58cbf976b62-config-volume\") pod \"coredns-5dd5756b68-pqbfp\" (UID: \"44750e8d-5eeb-4845-9029-a58cbf976b62\") " pod="kube-system/coredns-5dd5756b68-pqbfp"
Nov 22 00:20:00 old-k8s-version-462319 kubelet[1521]: I1122 00:20:00.993597 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2-tmp\") pod \"storage-provisioner\" (UID: \"fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2\") " pod="kube-system/storage-provisioner"
Nov 22 00:20:00 old-k8s-version-462319 kubelet[1521]: I1122 00:20:00.993637 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfnhk\" (UniqueName: \"kubernetes.io/projected/44750e8d-5eeb-4845-9029-a58cbf976b62-kube-api-access-pfnhk\") pod \"coredns-5dd5756b68-pqbfp\" (UID: \"44750e8d-5eeb-4845-9029-a58cbf976b62\") " pod="kube-system/coredns-5dd5756b68-pqbfp"
Nov 22 00:20:00 old-k8s-version-462319 kubelet[1521]: I1122 00:20:00.993669 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rj2fz\" (UniqueName: \"kubernetes.io/projected/fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2-kube-api-access-rj2fz\") pod \"storage-provisioner\" (UID: \"fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2\") " pod="kube-system/storage-provisioner"
Nov 22 00:20:01 old-k8s-version-462319 kubelet[1521]: I1122 00:20:01.564512 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=14.564413938 podCreationTimestamp="2025-11-22 00:19:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 00:20:01.564333027 +0000 UTC m=+28.243624860" watchObservedRunningTime="2025-11-22 00:20:01.564413938 +0000 UTC m=+28.243705771"
Nov 22 00:20:01 old-k8s-version-462319 kubelet[1521]: I1122 00:20:01.564659 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-pqbfp" podStartSLOduration=15.564629833 podCreationTimestamp="2025-11-22 00:19:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 00:20:01.551555332 +0000 UTC m=+28.230847165" watchObservedRunningTime="2025-11-22 00:20:01.564629833 +0000 UTC m=+28.243921660"
Nov 22 00:20:04 old-k8s-version-462319 kubelet[1521]: I1122 00:20:04.775067 1521 topology_manager.go:215] "Topology Admit Handler" podUID="89dd9411-148d-4a8e-98d3-a51a8eab9d35" podNamespace="default" podName="busybox"
Nov 22 00:20:04 old-k8s-version-462319 kubelet[1521]: I1122 00:20:04.915405 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7gkx\" (UniqueName: \"kubernetes.io/projected/89dd9411-148d-4a8e-98d3-a51a8eab9d35-kube-api-access-l7gkx\") pod \"busybox\" (UID: \"89dd9411-148d-4a8e-98d3-a51a8eab9d35\") " pod="default/busybox"
Nov 22 00:20:08 old-k8s-version-462319 kubelet[1521]: I1122 00:20:08.563800 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=2.326082204 podCreationTimestamp="2025-11-22 00:20:04 +0000 UTC" firstStartedPulling="2025-11-22 00:20:06.067901148 +0000 UTC m=+32.747192973" lastFinishedPulling="2025-11-22 00:20:08.305570732 +0000 UTC m=+34.984862556" observedRunningTime="2025-11-22 00:20:08.563606355 +0000 UTC m=+35.242898188" watchObservedRunningTime="2025-11-22 00:20:08.563751787 +0000 UTC m=+35.243043620"
==> storage-provisioner [f2a1ec178c227617bd32e678c94e3d44e606683f0b10ccdbc182dec6d6d5c9e9] <==
I1122 00:20:01.401220 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1122 00:20:01.412796 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1122 00:20:01.412842 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1122 00:20:01.421489 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1122 00:20:01.421683 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-462319_fbf5718a-3981-4828-8660-7b6ddab898c0!
I1122 00:20:01.421619 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"d8be93cf-82a7-4f20-a2ea-927b67416b8f", APIVersion:"v1", ResourceVersion:"405", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-462319_fbf5718a-3981-4828-8660-7b6ddab898c0 became leader
I1122 00:20:01.522750 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-462319_fbf5718a-3981-4828-8660-7b6ddab898c0!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-462319 -n old-k8s-version-462319
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-462319 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-462319
helpers_test.go:243: (dbg) docker inspect old-k8s-version-462319:
-- stdout --
[
{
"Id": "60eae3b63b81b346ead7547921d488153ed6b21604550a910dce24f5c18a0d66",
"Created": "2025-11-22T00:19:16.365495044Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 248707,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-22T00:19:16.402958348Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:e5906b22e872a17998ae88aee6d850484e7a99144e0db6afcf2c44a53e6042d4",
"ResolvConfPath": "/var/lib/docker/containers/60eae3b63b81b346ead7547921d488153ed6b21604550a910dce24f5c18a0d66/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/60eae3b63b81b346ead7547921d488153ed6b21604550a910dce24f5c18a0d66/hostname",
"HostsPath": "/var/lib/docker/containers/60eae3b63b81b346ead7547921d488153ed6b21604550a910dce24f5c18a0d66/hosts",
"LogPath": "/var/lib/docker/containers/60eae3b63b81b346ead7547921d488153ed6b21604550a910dce24f5c18a0d66/60eae3b63b81b346ead7547921d488153ed6b21604550a910dce24f5c18a0d66-json.log",
"Name": "/old-k8s-version-462319",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"old-k8s-version-462319:/var",
"/lib/modules:/lib/modules:ro"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-462319",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "60eae3b63b81b346ead7547921d488153ed6b21604550a910dce24f5c18a0d66",
"LowerDir": "/var/lib/docker/overlay2/6ca06b58ff047715f101193d0f051e92ffb3bb47f4e9e98de16e3d4c7f58beb1-init/diff:/var/lib/docker/overlay2/4b4af9a4e857911a6b5096aeeaee227ee7577c6eff3b08bbb4e765c49ed2fb70/diff",
"MergedDir": "/var/lib/docker/overlay2/6ca06b58ff047715f101193d0f051e92ffb3bb47f4e9e98de16e3d4c7f58beb1/merged",
"UpperDir": "/var/lib/docker/overlay2/6ca06b58ff047715f101193d0f051e92ffb3bb47f4e9e98de16e3d4c7f58beb1/diff",
"WorkDir": "/var/lib/docker/overlay2/6ca06b58ff047715f101193d0f051e92ffb3bb47f4e9e98de16e3d4c7f58beb1/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "old-k8s-version-462319",
"Source": "/var/lib/docker/volumes/old-k8s-version-462319/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "old-k8s-version-462319",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-462319",
"name.minikube.sigs.k8s.io": "old-k8s-version-462319",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "b6589169c31c78bfea6577019ea30ba0adadee1467810b9b1a0b1b8b4a97b9f5",
"SandboxKey": "/var/run/docker/netns/b6589169c31c",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33058"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33059"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33062"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33060"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33061"
}
]
},
"Networks": {
"old-k8s-version-462319": {
"IPAMConfig": {
"IPv4Address": "192.168.103.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "08252eaaf7e532efc839aa6b0c4ce7bea14dc3e5057df8085e81eab6e1e46265",
"EndpointID": "d132fdb6f6e769e175e9e69bd315da82881eb4351a6b66ae2fe24784dbabd3ac",
"Gateway": "192.168.103.1",
"IPAddress": "192.168.103.2",
"MacAddress": "6e:0f:4c:be:16:ac",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-462319",
"60eae3b63b81"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-462319 -n old-k8s-version-462319
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-462319 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-462319 logs -n 25: (1.108526213s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬────────────────────────┬─────────┬─────────┬─────────────────────┬──────────────
───────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼────────────────────────┼─────────┼─────────┼─────────────────────┼──────────────
───────┤
│ ssh │ -p cilium-687868 sudo systemctl status cri-docker --all --full --no-pager │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo systemctl cat cri-docker --no-pager │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo cri-dockerd --version │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo systemctl status containerd --all --full --no-pager │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo systemctl cat containerd --no-pager │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo cat /lib/systemd/system/containerd.service │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo cat /etc/containerd/config.toml │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo containerd config dump │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo systemctl status crio --all --full --no-pager │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo systemctl cat crio --no-pager │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ ssh │ -p cilium-687868 sudo crio config │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ delete │ -p cilium-687868 │ cilium-687868 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ 22 Nov 25 00:19 UTC │
│ start │ -p old-k8s-version-462319 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-462319 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ 22 Nov 25 00:20 UTC │
│ ssh │ -p NoKubernetes-714059 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-714059 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ start │ -p cert-expiration-427330 --memory=3072 --cert-expiration=8760h --driver=docker --container-runtime=containerd │ cert-expiration-427330 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ 22 Nov 25 00:19 UTC │
│ delete │ -p cert-expiration-427330 │ cert-expiration-427330 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ 22 Nov 25 00:19 UTC │
│ start │ -p no-preload-781232 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ no-preload-781232 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ 22 Nov 25 00:20 UTC │
│ stop │ -p NoKubernetes-714059 │ NoKubernetes-714059 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ 22 Nov 25 00:19 UTC │
│ start │ -p NoKubernetes-714059 --driver=docker --container-runtime=containerd │ NoKubernetes-714059 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ 22 Nov 25 00:19 UTC │
│ ssh │ -p NoKubernetes-714059 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-714059 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ │
│ delete │ -p NoKubernetes-714059 │ NoKubernetes-714059 │ jenkins │ v1.37.0 │ 22 Nov 25 00:19 UTC │ 22 Nov 25 00:20 UTC │
│ start │ -p embed-certs-491677 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ embed-certs-491677 │ jenkins │ v1.37.0 │ 22 Nov 25 00:20 UTC │ │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴────────────────────────┴─────────┴─────────┴─────────────────────┴──────────────
───────┘
==> Last Start <==
Log file created at: 2025/11/22 00:20:01
Running on machine: ubuntu-20-agent-6
Binary: Built with gc go1.25.3 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1122 00:20:01.497017 260527 out.go:360] Setting OutFile to fd 1 ...
I1122 00:20:01.497324 260527 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1122 00:20:01.497336 260527 out.go:374] Setting ErrFile to fd 2...
I1122 00:20:01.497340 260527 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1122 00:20:01.497588 260527 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21934-9059/.minikube/bin
I1122 00:20:01.498054 260527 out.go:368] Setting JSON to false
I1122 00:20:01.499443 260527 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-6","uptime":3740,"bootTime":1763767061,"procs":385,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1122 00:20:01.499503 260527 start.go:143] virtualization: kvm guest
I1122 00:20:01.501458 260527 out.go:179] * [embed-certs-491677] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1122 00:20:01.503562 260527 notify.go:221] Checking for updates...
I1122 00:20:01.503572 260527 out.go:179] - MINIKUBE_LOCATION=21934
I1122 00:20:01.505088 260527 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1122 00:20:01.506758 260527 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21934-9059/kubeconfig
I1122 00:20:01.508287 260527 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21934-9059/.minikube
I1122 00:20:01.509699 260527 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1122 00:20:01.511183 260527 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1122 00:20:01.513382 260527 config.go:182] Loaded profile config "kubernetes-upgrade-882262": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1122 00:20:01.513541 260527 config.go:182] Loaded profile config "no-preload-781232": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1122 00:20:01.513638 260527 config.go:182] Loaded profile config "old-k8s-version-462319": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1122 00:20:01.513752 260527 driver.go:422] Setting default libvirt URI to qemu:///system
I1122 00:20:01.545401 260527 docker.go:124] docker version: linux-29.0.2:Docker Engine - Community
I1122 00:20:01.545504 260527 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1122 00:20:01.611105 260527 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:63 OomKillDisable:false NGoroutines:75 SystemTime:2025-11-22 00:20:01.601298329 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1122 00:20:01.611234 260527 docker.go:319] overlay module found
I1122 00:20:01.613226 260527 out.go:179] * Using the docker driver based on user configuration
I1122 00:20:01.614649 260527 start.go:309] selected driver: docker
I1122 00:20:01.614666 260527 start.go:930] validating driver "docker" against <nil>
I1122 00:20:01.614677 260527 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1122 00:20:01.615350 260527 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1122 00:20:01.674666 260527 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:64 OomKillDisable:false NGoroutines:76 SystemTime:2025-11-22 00:20:01.664354692 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1122 00:20:01.674876 260527 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1122 00:20:01.675176 260527 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1122 00:20:01.676975 260527 out.go:179] * Using Docker driver with root privileges
I1122 00:20:01.678251 260527 cni.go:84] Creating CNI manager for ""
I1122 00:20:01.678367 260527 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1122 00:20:01.678383 260527 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1122 00:20:01.678481 260527 start.go:353] cluster config:
{Name:embed-certs-491677 KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:embed-certs-491677 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Contain
erRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock
: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1122 00:20:01.680036 260527 out.go:179] * Starting "embed-certs-491677" primary control-plane node in "embed-certs-491677" cluster
I1122 00:20:01.683810 260527 cache.go:134] Beginning downloading kic base image for docker with containerd
I1122 00:20:01.685242 260527 out.go:179] * Pulling base image v0.0.48-1763588073-21934 ...
I1122 00:20:01.686680 260527 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1122 00:20:01.686729 260527 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21934-9059/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4
I1122 00:20:01.686743 260527 cache.go:65] Caching tarball of preloaded images
I1122 00:20:01.686775 260527 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e in local docker daemon
I1122 00:20:01.686916 260527 preload.go:238] Found /home/jenkins/minikube-integration/21934-9059/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I1122 00:20:01.686942 260527 cache.go:68] Finished verifying existence of preloaded tar for v1.34.1 on containerd
I1122 00:20:01.687116 260527 profile.go:143] Saving config to /home/jenkins/minikube-integration/21934-9059/.minikube/profiles/embed-certs-491677/config.json ...
I1122 00:20:01.687148 260527 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21934-9059/.minikube/profiles/embed-certs-491677/config.json: {Name:mkf02d672882aad1c3b94e79745f8cf62e3f5b13 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1122 00:20:01.708872 260527 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e in local docker daemon, skipping pull
I1122 00:20:01.708897 260527 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e exists in daemon, skipping load
I1122 00:20:01.708914 260527 cache.go:243] Successfully downloaded all kic artifacts
I1122 00:20:01.708943 260527 start.go:360] acquireMachinesLock for embed-certs-491677: {Name:mkbe59d49caffedca862a9ecb177d8d82196efdb Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1122 00:20:01.709044 260527 start.go:364] duration metric: took 84.98µs to acquireMachinesLock for "embed-certs-491677"
I1122 00:20:01.709067 260527 start.go:93] Provisioning new machine with config: &{Name:embed-certs-491677 KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:embed-certs-491677 Namespace:default APIServerHAVIP: APIServe
rName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cus
tomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1122 00:20:01.709131 260527 start.go:125] createHost starting for "" (driver="docker")
I1122 00:19:58.829298 218693 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1122 00:19:58.829759 218693 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1122 00:19:58.829815 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1122 00:19:58.829864 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1122 00:19:58.856999 218693 cri.go:89] found id: "031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:19:58.857027 218693 cri.go:89] found id: "2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:19:58.857033 218693 cri.go:89] found id: ""
I1122 00:19:58.857044 218693 logs.go:282] 2 containers: [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587]
I1122 00:19:58.857093 218693 ssh_runner.go:195] Run: which crictl
I1122 00:19:58.861107 218693 ssh_runner.go:195] Run: which crictl
I1122 00:19:58.865268 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1122 00:19:58.865337 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1122 00:19:58.892542 218693 cri.go:89] found id: "ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:19:58.892564 218693 cri.go:89] found id: ""
I1122 00:19:58.892572 218693 logs.go:282] 1 containers: [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7]
I1122 00:19:58.892626 218693 ssh_runner.go:195] Run: which crictl
I1122 00:19:58.896771 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1122 00:19:58.896846 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1122 00:19:58.925628 218693 cri.go:89] found id: ""
I1122 00:19:58.925652 218693 logs.go:282] 0 containers: []
W1122 00:19:58.925660 218693 logs.go:284] No container was found matching "coredns"
I1122 00:19:58.925666 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1122 00:19:58.925724 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1122 00:19:58.955304 218693 cri.go:89] found id: "8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:19:58.955326 218693 cri.go:89] found id: "b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:19:58.955332 218693 cri.go:89] found id: ""
I1122 00:19:58.955340 218693 logs.go:282] 2 containers: [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2]
I1122 00:19:58.955397 218693 ssh_runner.go:195] Run: which crictl
I1122 00:19:58.959396 218693 ssh_runner.go:195] Run: which crictl
I1122 00:19:58.963562 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1122 00:19:58.963626 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1122 00:19:58.991860 218693 cri.go:89] found id: ""
I1122 00:19:58.991883 218693 logs.go:282] 0 containers: []
W1122 00:19:58.991890 218693 logs.go:284] No container was found matching "kube-proxy"
I1122 00:19:58.991895 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1122 00:19:58.991949 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1122 00:19:59.020457 218693 cri.go:89] found id: "91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:19:59.020483 218693 cri.go:89] found id: "13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:19:59.020489 218693 cri.go:89] found id: ""
I1122 00:19:59.020502 218693 logs.go:282] 2 containers: [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216]
I1122 00:19:59.020550 218693 ssh_runner.go:195] Run: which crictl
I1122 00:19:59.024967 218693 ssh_runner.go:195] Run: which crictl
I1122 00:19:59.031778 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1122 00:19:59.031854 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1122 00:19:59.061726 218693 cri.go:89] found id: ""
I1122 00:19:59.061752 218693 logs.go:282] 0 containers: []
W1122 00:19:59.061763 218693 logs.go:284] No container was found matching "kindnet"
I1122 00:19:59.061771 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1122 00:19:59.061831 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1122 00:19:59.089141 218693 cri.go:89] found id: ""
I1122 00:19:59.089164 218693 logs.go:282] 0 containers: []
W1122 00:19:59.089174 218693 logs.go:284] No container was found matching "storage-provisioner"
I1122 00:19:59.089185 218693 logs.go:123] Gathering logs for kubelet ...
I1122 00:19:59.089198 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1122 00:19:59.186417 218693 logs.go:123] Gathering logs for dmesg ...
I1122 00:19:59.186452 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1122 00:19:59.201060 218693 logs.go:123] Gathering logs for describe nodes ...
I1122 00:19:59.201095 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1122 00:19:59.264254 218693 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1122 00:19:59.264297 218693 logs.go:123] Gathering logs for kube-apiserver [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d] ...
I1122 00:19:59.264313 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:19:59.303605 218693 logs.go:123] Gathering logs for kube-scheduler [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78] ...
I1122 00:19:59.303643 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:19:59.358382 218693 logs.go:123] Gathering logs for kube-controller-manager [13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216] ...
I1122 00:19:59.358425 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:19:59.398629 218693 logs.go:123] Gathering logs for containerd ...
I1122 00:19:59.398669 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1122 00:19:59.449463 218693 logs.go:123] Gathering logs for kube-apiserver [2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587] ...
I1122 00:19:59.449505 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:19:59.487365 218693 logs.go:123] Gathering logs for etcd [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7] ...
I1122 00:19:59.487403 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:19:59.526046 218693 logs.go:123] Gathering logs for kube-scheduler [b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2] ...
I1122 00:19:59.526080 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:19:59.562812 218693 logs.go:123] Gathering logs for kube-controller-manager [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a] ...
I1122 00:19:59.562843 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:19:59.594191 218693 logs.go:123] Gathering logs for container status ...
I1122 00:19:59.594230 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1122 00:20:02.129372 218693 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1122 00:20:02.129923 218693 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1122 00:20:02.130004 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1122 00:20:02.130071 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1122 00:20:02.161455 218693 cri.go:89] found id: "031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:20:02.161484 218693 cri.go:89] found id: "2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:20:02.161490 218693 cri.go:89] found id: ""
I1122 00:20:02.161501 218693 logs.go:282] 2 containers: [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587]
I1122 00:20:02.161563 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:02.165824 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:02.170451 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1122 00:20:02.170522 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1122 00:19:58.029853 251199 kapi.go:214] "coredns" deployment in "kube-system" namespace and "no-preload-781232" context rescaled to 1 replicas
W1122 00:19:59.529847 251199 node_ready.go:57] node "no-preload-781232" has "Ready":"False" status (will retry)
W1122 00:20:01.530493 251199 node_ready.go:57] node "no-preload-781232" has "Ready":"False" status (will retry)
W1122 00:20:00.520224 247021 node_ready.go:57] node "old-k8s-version-462319" has "Ready":"False" status (will retry)
I1122 00:20:01.019651 247021 node_ready.go:49] node "old-k8s-version-462319" is "Ready"
I1122 00:20:01.019681 247021 node_ready.go:38] duration metric: took 14.003330086s for node "old-k8s-version-462319" to be "Ready" ...
I1122 00:20:01.019696 247021 api_server.go:52] waiting for apiserver process to appear ...
I1122 00:20:01.019743 247021 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1122 00:20:01.032926 247021 api_server.go:72] duration metric: took 14.481952557s to wait for apiserver process to appear ...
I1122 00:20:01.032954 247021 api_server.go:88] waiting for apiserver healthz status ...
I1122 00:20:01.032973 247021 api_server.go:253] Checking apiserver healthz at https://192.168.103.2:8443/healthz ...
I1122 00:20:01.039899 247021 api_server.go:279] https://192.168.103.2:8443/healthz returned 200:
ok
I1122 00:20:01.041146 247021 api_server.go:141] control plane version: v1.28.0
I1122 00:20:01.041172 247021 api_server.go:131] duration metric: took 8.212119ms to wait for apiserver health ...
I1122 00:20:01.041191 247021 system_pods.go:43] waiting for kube-system pods to appear ...
I1122 00:20:01.044815 247021 system_pods.go:59] 8 kube-system pods found
I1122 00:20:01.044853 247021 system_pods.go:61] "coredns-5dd5756b68-pqbfp" [44750e8d-5eeb-4845-9029-a58cbf976b62] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:01.044862 247021 system_pods.go:61] "etcd-old-k8s-version-462319" [9580468b-aa0f-4d73-9c35-f9cc4c817cdd] Running
I1122 00:20:01.044874 247021 system_pods.go:61] "kindnet-ldtd8" [6bf161d2-c442-466d-98b8-c313a127bf22] Running
I1122 00:20:01.044879 247021 system_pods.go:61] "kube-apiserver-old-k8s-version-462319" [2f4b6fd0-2929-448d-820c-aabf2a9d4744] Running
I1122 00:20:01.044888 247021 system_pods.go:61] "kube-controller-manager-old-k8s-version-462319" [83b4a291-8bac-4581-b4a6-80471e7228eb] Running
I1122 00:20:01.044897 247021 system_pods.go:61] "kube-proxy-kqrng" [643cd348-4af3-4720-af0d-e931f184742c] Running
I1122 00:20:01.044901 247021 system_pods.go:61] "kube-scheduler-old-k8s-version-462319" [c1dc982d-cc79-4df6-bdc4-7e47f5d5236c] Running
I1122 00:20:01.044909 247021 system_pods.go:61] "storage-provisioner" [fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1122 00:20:01.044918 247021 system_pods.go:74] duration metric: took 3.718269ms to wait for pod list to return data ...
I1122 00:20:01.044929 247021 default_sa.go:34] waiting for default service account to be created ...
I1122 00:20:01.047150 247021 default_sa.go:45] found service account: "default"
I1122 00:20:01.047173 247021 default_sa.go:55] duration metric: took 2.236156ms for default service account to be created ...
I1122 00:20:01.047182 247021 system_pods.go:116] waiting for k8s-apps to be running ...
I1122 00:20:01.050474 247021 system_pods.go:86] 8 kube-system pods found
I1122 00:20:01.050506 247021 system_pods.go:89] "coredns-5dd5756b68-pqbfp" [44750e8d-5eeb-4845-9029-a58cbf976b62] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:01.050514 247021 system_pods.go:89] "etcd-old-k8s-version-462319" [9580468b-aa0f-4d73-9c35-f9cc4c817cdd] Running
I1122 00:20:01.050523 247021 system_pods.go:89] "kindnet-ldtd8" [6bf161d2-c442-466d-98b8-c313a127bf22] Running
I1122 00:20:01.050528 247021 system_pods.go:89] "kube-apiserver-old-k8s-version-462319" [2f4b6fd0-2929-448d-820c-aabf2a9d4744] Running
I1122 00:20:01.050533 247021 system_pods.go:89] "kube-controller-manager-old-k8s-version-462319" [83b4a291-8bac-4581-b4a6-80471e7228eb] Running
I1122 00:20:01.050539 247021 system_pods.go:89] "kube-proxy-kqrng" [643cd348-4af3-4720-af0d-e931f184742c] Running
I1122 00:20:01.050544 247021 system_pods.go:89] "kube-scheduler-old-k8s-version-462319" [c1dc982d-cc79-4df6-bdc4-7e47f5d5236c] Running
I1122 00:20:01.050551 247021 system_pods.go:89] "storage-provisioner" [fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1122 00:20:01.050577 247021 retry.go:31] will retry after 205.575764ms: missing components: kube-dns
I1122 00:20:01.261814 247021 system_pods.go:86] 8 kube-system pods found
I1122 00:20:01.261847 247021 system_pods.go:89] "coredns-5dd5756b68-pqbfp" [44750e8d-5eeb-4845-9029-a58cbf976b62] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:01.261859 247021 system_pods.go:89] "etcd-old-k8s-version-462319" [9580468b-aa0f-4d73-9c35-f9cc4c817cdd] Running
I1122 00:20:01.261865 247021 system_pods.go:89] "kindnet-ldtd8" [6bf161d2-c442-466d-98b8-c313a127bf22] Running
I1122 00:20:01.261869 247021 system_pods.go:89] "kube-apiserver-old-k8s-version-462319" [2f4b6fd0-2929-448d-820c-aabf2a9d4744] Running
I1122 00:20:01.261873 247021 system_pods.go:89] "kube-controller-manager-old-k8s-version-462319" [83b4a291-8bac-4581-b4a6-80471e7228eb] Running
I1122 00:20:01.261877 247021 system_pods.go:89] "kube-proxy-kqrng" [643cd348-4af3-4720-af0d-e931f184742c] Running
I1122 00:20:01.261879 247021 system_pods.go:89] "kube-scheduler-old-k8s-version-462319" [c1dc982d-cc79-4df6-bdc4-7e47f5d5236c] Running
I1122 00:20:01.261884 247021 system_pods.go:89] "storage-provisioner" [fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1122 00:20:01.261900 247021 retry.go:31] will retry after 236.21482ms: missing components: kube-dns
I1122 00:20:01.502877 247021 system_pods.go:86] 8 kube-system pods found
I1122 00:20:01.502913 247021 system_pods.go:89] "coredns-5dd5756b68-pqbfp" [44750e8d-5eeb-4845-9029-a58cbf976b62] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:01.502921 247021 system_pods.go:89] "etcd-old-k8s-version-462319" [9580468b-aa0f-4d73-9c35-f9cc4c817cdd] Running
I1122 00:20:01.502929 247021 system_pods.go:89] "kindnet-ldtd8" [6bf161d2-c442-466d-98b8-c313a127bf22] Running
I1122 00:20:01.502935 247021 system_pods.go:89] "kube-apiserver-old-k8s-version-462319" [2f4b6fd0-2929-448d-820c-aabf2a9d4744] Running
I1122 00:20:01.502952 247021 system_pods.go:89] "kube-controller-manager-old-k8s-version-462319" [83b4a291-8bac-4581-b4a6-80471e7228eb] Running
I1122 00:20:01.502957 247021 system_pods.go:89] "kube-proxy-kqrng" [643cd348-4af3-4720-af0d-e931f184742c] Running
I1122 00:20:01.502962 247021 system_pods.go:89] "kube-scheduler-old-k8s-version-462319" [c1dc982d-cc79-4df6-bdc4-7e47f5d5236c] Running
I1122 00:20:01.502984 247021 system_pods.go:89] "storage-provisioner" [fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1122 00:20:01.503005 247021 retry.go:31] will retry after 442.873739ms: missing components: kube-dns
I1122 00:20:01.950449 247021 system_pods.go:86] 8 kube-system pods found
I1122 00:20:01.950483 247021 system_pods.go:89] "coredns-5dd5756b68-pqbfp" [44750e8d-5eeb-4845-9029-a58cbf976b62] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:01.950492 247021 system_pods.go:89] "etcd-old-k8s-version-462319" [9580468b-aa0f-4d73-9c35-f9cc4c817cdd] Running
I1122 00:20:01.950500 247021 system_pods.go:89] "kindnet-ldtd8" [6bf161d2-c442-466d-98b8-c313a127bf22] Running
I1122 00:20:01.950505 247021 system_pods.go:89] "kube-apiserver-old-k8s-version-462319" [2f4b6fd0-2929-448d-820c-aabf2a9d4744] Running
I1122 00:20:01.950516 247021 system_pods.go:89] "kube-controller-manager-old-k8s-version-462319" [83b4a291-8bac-4581-b4a6-80471e7228eb] Running
I1122 00:20:01.950521 247021 system_pods.go:89] "kube-proxy-kqrng" [643cd348-4af3-4720-af0d-e931f184742c] Running
I1122 00:20:01.950526 247021 system_pods.go:89] "kube-scheduler-old-k8s-version-462319" [c1dc982d-cc79-4df6-bdc4-7e47f5d5236c] Running
I1122 00:20:01.950530 247021 system_pods.go:89] "storage-provisioner" [fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2] Running
I1122 00:20:01.950541 247021 system_pods.go:126] duration metric: took 903.352039ms to wait for k8s-apps to be running ...
I1122 00:20:01.950553 247021 system_svc.go:44] waiting for kubelet service to be running ....
I1122 00:20:01.950602 247021 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1122 00:20:01.964580 247021 system_svc.go:56] duration metric: took 14.015441ms WaitForService to wait for kubelet
I1122 00:20:01.964612 247021 kubeadm.go:587] duration metric: took 15.413644993s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1122 00:20:01.964634 247021 node_conditions.go:102] verifying NodePressure condition ...
I1122 00:20:01.968157 247021 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1122 00:20:01.968185 247021 node_conditions.go:123] node cpu capacity is 8
I1122 00:20:01.968205 247021 node_conditions.go:105] duration metric: took 3.565831ms to run NodePressure ...
I1122 00:20:01.968227 247021 start.go:242] waiting for startup goroutines ...
I1122 00:20:01.968237 247021 start.go:247] waiting for cluster config update ...
I1122 00:20:01.968254 247021 start.go:256] writing updated cluster config ...
I1122 00:20:01.968545 247021 ssh_runner.go:195] Run: rm -f paused
I1122 00:20:01.972712 247021 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1122 00:20:01.976920 247021 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-pqbfp" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:02.983354 247021 pod_ready.go:94] pod "coredns-5dd5756b68-pqbfp" is "Ready"
I1122 00:20:02.983385 247021 pod_ready.go:86] duration metric: took 1.00643947s for pod "coredns-5dd5756b68-pqbfp" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:02.987209 247021 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:02.992024 247021 pod_ready.go:94] pod "etcd-old-k8s-version-462319" is "Ready"
I1122 00:20:02.992053 247021 pod_ready.go:86] duration metric: took 4.821819ms for pod "etcd-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:02.994875 247021 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:02.998765 247021 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-462319" is "Ready"
I1122 00:20:02.998789 247021 pod_ready.go:86] duration metric: took 3.892836ms for pod "kube-apiserver-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:03.001798 247021 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:03.181579 247021 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-462319" is "Ready"
I1122 00:20:03.181611 247021 pod_ready.go:86] duration metric: took 179.791243ms for pod "kube-controller-manager-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:03.381883 247021 pod_ready.go:83] waiting for pod "kube-proxy-kqrng" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:03.781562 247021 pod_ready.go:94] pod "kube-proxy-kqrng" is "Ready"
I1122 00:20:03.781594 247021 pod_ready.go:86] duration metric: took 399.684082ms for pod "kube-proxy-kqrng" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:03.981736 247021 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:04.381559 247021 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-462319" is "Ready"
I1122 00:20:04.381590 247021 pod_ready.go:86] duration metric: took 399.825883ms for pod "kube-scheduler-old-k8s-version-462319" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:04.381604 247021 pod_ready.go:40] duration metric: took 2.408861294s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1122 00:20:04.431804 247021 start.go:628] kubectl: 1.34.2, cluster: 1.28.0 (minor skew: 6)
I1122 00:20:04.435233 247021 out.go:203]
W1122 00:20:04.436473 247021 out.go:285] ! /usr/local/bin/kubectl is version 1.34.2, which may have incompatibilities with Kubernetes 1.28.0.
I1122 00:20:04.437863 247021 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1122 00:20:04.439555 247021 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-462319" cluster and "default" namespace by default
I1122 00:20:01.711315 260527 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1122 00:20:01.711555 260527 start.go:159] libmachine.API.Create for "embed-certs-491677" (driver="docker")
I1122 00:20:01.711610 260527 client.go:173] LocalClient.Create starting
I1122 00:20:01.711685 260527 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21934-9059/.minikube/certs/ca.pem
I1122 00:20:01.711719 260527 main.go:143] libmachine: Decoding PEM data...
I1122 00:20:01.711737 260527 main.go:143] libmachine: Parsing certificate...
I1122 00:20:01.711816 260527 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21934-9059/.minikube/certs/cert.pem
I1122 00:20:01.711837 260527 main.go:143] libmachine: Decoding PEM data...
I1122 00:20:01.711846 260527 main.go:143] libmachine: Parsing certificate...
I1122 00:20:01.712184 260527 cli_runner.go:164] Run: docker network inspect embed-certs-491677 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1122 00:20:01.730686 260527 cli_runner.go:211] docker network inspect embed-certs-491677 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1122 00:20:01.730752 260527 network_create.go:284] running [docker network inspect embed-certs-491677] to gather additional debugging logs...
I1122 00:20:01.730771 260527 cli_runner.go:164] Run: docker network inspect embed-certs-491677
W1122 00:20:01.749708 260527 cli_runner.go:211] docker network inspect embed-certs-491677 returned with exit code 1
I1122 00:20:01.749739 260527 network_create.go:287] error running [docker network inspect embed-certs-491677]: docker network inspect embed-certs-491677: exit status 1
stdout:
[]
stderr:
Error response from daemon: network embed-certs-491677 not found
I1122 00:20:01.749755 260527 network_create.go:289] output of [docker network inspect embed-certs-491677]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network embed-certs-491677 not found
** /stderr **
I1122 00:20:01.749902 260527 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1122 00:20:01.769006 260527 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-1df6c22ede91 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:5a:c7:f4:a5:24:54} reservation:<nil>}
I1122 00:20:01.769731 260527 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-7d48551462a8 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:ca:3b:0e:74:ee:57} reservation:<nil>}
I1122 00:20:01.770416 260527 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-c50004b7f5b6 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:56:73:1e:0d:b7:11} reservation:<nil>}
I1122 00:20:01.771113 260527 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-166d2f324fb5 IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:02:da:99:1e:87:6f} reservation:<nil>}
I1122 00:20:01.771891 260527 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001ebca10}
I1122 00:20:01.771919 260527 network_create.go:124] attempt to create docker network embed-certs-491677 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1122 00:20:01.771970 260527 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=embed-certs-491677 embed-certs-491677
I1122 00:20:01.823460 260527 network_create.go:108] docker network embed-certs-491677 192.168.85.0/24 created
I1122 00:20:01.823495 260527 kic.go:121] calculated static IP "192.168.85.2" for the "embed-certs-491677" container
I1122 00:20:01.823677 260527 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1122 00:20:01.843300 260527 cli_runner.go:164] Run: docker volume create embed-certs-491677 --label name.minikube.sigs.k8s.io=embed-certs-491677 --label created_by.minikube.sigs.k8s.io=true
I1122 00:20:01.863723 260527 oci.go:103] Successfully created a docker volume embed-certs-491677
I1122 00:20:01.863797 260527 cli_runner.go:164] Run: docker run --rm --name embed-certs-491677-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=embed-certs-491677 --entrypoint /usr/bin/test -v embed-certs-491677:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e -d /var/lib
I1122 00:20:02.270865 260527 oci.go:107] Successfully prepared a docker volume embed-certs-491677
I1122 00:20:02.270965 260527 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1122 00:20:02.270986 260527 kic.go:194] Starting extracting preloaded images to volume ...
I1122 00:20:02.271058 260527 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21934-9059/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v embed-certs-491677:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e -I lz4 -xf /preloaded.tar -C /extractDir
I1122 00:20:02.204729 218693 cri.go:89] found id: "ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:20:02.204756 218693 cri.go:89] found id: ""
I1122 00:20:02.204766 218693 logs.go:282] 1 containers: [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7]
I1122 00:20:02.204829 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:02.209535 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1122 00:20:02.209603 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1122 00:20:02.247383 218693 cri.go:89] found id: ""
I1122 00:20:02.247408 218693 logs.go:282] 0 containers: []
W1122 00:20:02.247416 218693 logs.go:284] No container was found matching "coredns"
I1122 00:20:02.247422 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1122 00:20:02.247484 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1122 00:20:02.277440 218693 cri.go:89] found id: "8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:20:02.277466 218693 cri.go:89] found id: "b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:20:02.277473 218693 cri.go:89] found id: ""
I1122 00:20:02.277483 218693 logs.go:282] 2 containers: [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2]
I1122 00:20:02.277545 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:02.282049 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:02.286514 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1122 00:20:02.286581 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1122 00:20:02.316706 218693 cri.go:89] found id: ""
I1122 00:20:02.316733 218693 logs.go:282] 0 containers: []
W1122 00:20:02.316744 218693 logs.go:284] No container was found matching "kube-proxy"
I1122 00:20:02.316753 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1122 00:20:02.316813 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1122 00:20:02.347451 218693 cri.go:89] found id: "91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:20:02.347471 218693 cri.go:89] found id: "13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:20:02.347476 218693 cri.go:89] found id: ""
I1122 00:20:02.347486 218693 logs.go:282] 2 containers: [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216]
I1122 00:20:02.347542 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:02.352378 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:02.356502 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1122 00:20:02.356561 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1122 00:20:02.384778 218693 cri.go:89] found id: ""
I1122 00:20:02.384802 218693 logs.go:282] 0 containers: []
W1122 00:20:02.384814 218693 logs.go:284] No container was found matching "kindnet"
I1122 00:20:02.384825 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1122 00:20:02.384887 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1122 00:20:02.421102 218693 cri.go:89] found id: ""
I1122 00:20:02.421131 218693 logs.go:282] 0 containers: []
W1122 00:20:02.421143 218693 logs.go:284] No container was found matching "storage-provisioner"
I1122 00:20:02.421156 218693 logs.go:123] Gathering logs for containerd ...
I1122 00:20:02.421171 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1122 00:20:02.477880 218693 logs.go:123] Gathering logs for kubelet ...
I1122 00:20:02.477924 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1122 00:20:02.574856 218693 logs.go:123] Gathering logs for describe nodes ...
I1122 00:20:02.574892 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1122 00:20:02.641120 218693 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1122 00:20:02.641142 218693 logs.go:123] Gathering logs for kube-apiserver [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d] ...
I1122 00:20:02.641154 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:20:02.681648 218693 logs.go:123] Gathering logs for kube-scheduler [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78] ...
I1122 00:20:02.681686 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:20:02.739093 218693 logs.go:123] Gathering logs for kube-scheduler [b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2] ...
I1122 00:20:02.739128 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:20:02.774358 218693 logs.go:123] Gathering logs for kube-controller-manager [13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216] ...
I1122 00:20:02.774395 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:20:02.810272 218693 logs.go:123] Gathering logs for container status ...
I1122 00:20:02.810310 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1122 00:20:02.842900 218693 logs.go:123] Gathering logs for dmesg ...
I1122 00:20:02.842942 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1122 00:20:02.857743 218693 logs.go:123] Gathering logs for kube-apiserver [2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587] ...
I1122 00:20:02.857784 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:20:02.894229 218693 logs.go:123] Gathering logs for etcd [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7] ...
I1122 00:20:02.894272 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:20:02.929523 218693 logs.go:123] Gathering logs for kube-controller-manager [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a] ...
I1122 00:20:02.929555 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:20:05.459958 218693 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1122 00:20:05.460532 218693 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1122 00:20:05.460597 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1122 00:20:05.460676 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1122 00:20:05.488636 218693 cri.go:89] found id: "031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:20:05.488658 218693 cri.go:89] found id: "2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:20:05.488662 218693 cri.go:89] found id: ""
I1122 00:20:05.488670 218693 logs.go:282] 2 containers: [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587]
I1122 00:20:05.488715 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:05.492971 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:05.496804 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1122 00:20:05.496876 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1122 00:20:05.524856 218693 cri.go:89] found id: "ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:20:05.524883 218693 cri.go:89] found id: ""
I1122 00:20:05.524902 218693 logs.go:282] 1 containers: [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7]
I1122 00:20:05.524962 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:05.529434 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1122 00:20:05.529521 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1122 00:20:05.557780 218693 cri.go:89] found id: ""
I1122 00:20:05.557805 218693 logs.go:282] 0 containers: []
W1122 00:20:05.557819 218693 logs.go:284] No container was found matching "coredns"
I1122 00:20:05.557828 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1122 00:20:05.557885 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1122 00:20:05.586142 218693 cri.go:89] found id: "8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:20:05.586166 218693 cri.go:89] found id: "b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:20:05.586173 218693 cri.go:89] found id: ""
I1122 00:20:05.586184 218693 logs.go:282] 2 containers: [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2]
I1122 00:20:05.586248 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:05.590458 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:05.594671 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1122 00:20:05.594752 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1122 00:20:05.623542 218693 cri.go:89] found id: ""
I1122 00:20:05.623565 218693 logs.go:282] 0 containers: []
W1122 00:20:05.623575 218693 logs.go:284] No container was found matching "kube-proxy"
I1122 00:20:05.623585 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1122 00:20:05.623653 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1122 00:20:05.651642 218693 cri.go:89] found id: "91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:20:05.651663 218693 cri.go:89] found id: "13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:20:05.651666 218693 cri.go:89] found id: ""
I1122 00:20:05.651674 218693 logs.go:282] 2 containers: [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216]
I1122 00:20:05.651724 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:05.655785 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:05.659668 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1122 00:20:05.659743 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1122 00:20:05.687725 218693 cri.go:89] found id: ""
I1122 00:20:05.687748 218693 logs.go:282] 0 containers: []
W1122 00:20:05.687756 218693 logs.go:284] No container was found matching "kindnet"
I1122 00:20:05.687762 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1122 00:20:05.687810 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1122 00:20:05.714403 218693 cri.go:89] found id: ""
I1122 00:20:05.714432 218693 logs.go:282] 0 containers: []
W1122 00:20:05.714444 218693 logs.go:284] No container was found matching "storage-provisioner"
I1122 00:20:05.714457 218693 logs.go:123] Gathering logs for kube-apiserver [2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587] ...
I1122 00:20:05.714472 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:20:05.748851 218693 logs.go:123] Gathering logs for kube-scheduler [b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2] ...
I1122 00:20:05.748901 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:20:05.784862 218693 logs.go:123] Gathering logs for kube-controller-manager [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a] ...
I1122 00:20:05.784899 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:20:05.813532 218693 logs.go:123] Gathering logs for kube-controller-manager [13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216] ...
I1122 00:20:05.813569 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:20:05.844930 218693 logs.go:123] Gathering logs for containerd ...
I1122 00:20:05.844965 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1122 00:20:05.897273 218693 logs.go:123] Gathering logs for container status ...
I1122 00:20:05.897337 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1122 00:20:05.935381 218693 logs.go:123] Gathering logs for kubelet ...
I1122 00:20:05.935417 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1122 00:20:06.025566 218693 logs.go:123] Gathering logs for dmesg ...
I1122 00:20:06.025612 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1122 00:20:06.040810 218693 logs.go:123] Gathering logs for describe nodes ...
I1122 00:20:06.040843 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1122 00:20:06.102006 218693 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1122 00:20:06.102032 218693 logs.go:123] Gathering logs for etcd [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7] ...
I1122 00:20:06.102050 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:20:06.136887 218693 logs.go:123] Gathering logs for kube-scheduler [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78] ...
I1122 00:20:06.136937 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:20:06.192634 218693 logs.go:123] Gathering logs for kube-apiserver [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d] ...
I1122 00:20:06.192674 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
W1122 00:20:04.029159 251199 node_ready.go:57] node "no-preload-781232" has "Ready":"False" status (will retry)
W1122 00:20:06.067087 251199 node_ready.go:57] node "no-preload-781232" has "Ready":"False" status (will retry)
I1122 00:20:06.722373 260527 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21934-9059/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v embed-certs-491677:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e -I lz4 -xf /preloaded.tar -C /extractDir: (4.451238931s)
I1122 00:20:06.722412 260527 kic.go:203] duration metric: took 4.451422839s to extract preloaded images to volume ...
W1122 00:20:06.722533 260527 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1122 00:20:06.722570 260527 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1122 00:20:06.722615 260527 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1122 00:20:06.782296 260527 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname embed-certs-491677 --name embed-certs-491677 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=embed-certs-491677 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=embed-certs-491677 --network embed-certs-491677 --ip 192.168.85.2 --volume embed-certs-491677:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763588073-21934@sha256:19d3da0413e1bfa354cbb88004c6796f8e9772a083e0230b0f6e50212ee04c7e
I1122 00:20:07.109552 260527 cli_runner.go:164] Run: docker container inspect embed-certs-491677 --format={{.State.Running}}
I1122 00:20:07.129178 260527 cli_runner.go:164] Run: docker container inspect embed-certs-491677 --format={{.State.Status}}
I1122 00:20:07.148399 260527 cli_runner.go:164] Run: docker exec embed-certs-491677 stat /var/lib/dpkg/alternatives/iptables
I1122 00:20:07.196229 260527 oci.go:144] the created container "embed-certs-491677" has a running status.
I1122 00:20:07.196362 260527 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21934-9059/.minikube/machines/embed-certs-491677/id_rsa...
I1122 00:20:07.257446 260527 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21934-9059/.minikube/machines/embed-certs-491677/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1122 00:20:07.289218 260527 cli_runner.go:164] Run: docker container inspect embed-certs-491677 --format={{.State.Status}}
I1122 00:20:07.310559 260527 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1122 00:20:07.310578 260527 kic_runner.go:114] Args: [docker exec --privileged embed-certs-491677 chown docker:docker /home/docker/.ssh/authorized_keys]
I1122 00:20:07.351585 260527 cli_runner.go:164] Run: docker container inspect embed-certs-491677 --format={{.State.Status}}
I1122 00:20:07.374469 260527 machine.go:94] provisionDockerMachine start ...
I1122 00:20:07.374754 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:07.397641 260527 main.go:143] libmachine: Using SSH client type: native
I1122 00:20:07.397885 260527 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33073 <nil> <nil>}
I1122 00:20:07.397902 260527 main.go:143] libmachine: About to run SSH command:
hostname
I1122 00:20:07.398578 260527 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:36770->127.0.0.1:33073: read: connection reset by peer
I1122 00:20:10.523553 260527 main.go:143] libmachine: SSH cmd err, output: <nil>: embed-certs-491677
I1122 00:20:10.523587 260527 ubuntu.go:182] provisioning hostname "embed-certs-491677"
I1122 00:20:10.523652 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:10.544251 260527 main.go:143] libmachine: Using SSH client type: native
I1122 00:20:10.544519 260527 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33073 <nil> <nil>}
I1122 00:20:10.544536 260527 main.go:143] libmachine: About to run SSH command:
sudo hostname embed-certs-491677 && echo "embed-certs-491677" | sudo tee /etc/hostname
I1122 00:20:10.679747 260527 main.go:143] libmachine: SSH cmd err, output: <nil>: embed-certs-491677
I1122 00:20:10.679832 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:10.700586 260527 main.go:143] libmachine: Using SSH client type: native
I1122 00:20:10.700833 260527 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33073 <nil> <nil>}
I1122 00:20:10.700858 260527 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sembed-certs-491677' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 embed-certs-491677/g' /etc/hosts;
else
echo '127.0.1.1 embed-certs-491677' | sudo tee -a /etc/hosts;
fi
fi
I1122 00:20:10.825289 260527 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1122 00:20:10.825326 260527 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21934-9059/.minikube CaCertPath:/home/jenkins/minikube-integration/21934-9059/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21934-9059/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21934-9059/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21934-9059/.minikube}
I1122 00:20:10.825375 260527 ubuntu.go:190] setting up certificates
I1122 00:20:10.825411 260527 provision.go:84] configureAuth start
I1122 00:20:10.825489 260527 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" embed-certs-491677
I1122 00:20:10.844220 260527 provision.go:143] copyHostCerts
I1122 00:20:10.844298 260527 exec_runner.go:144] found /home/jenkins/minikube-integration/21934-9059/.minikube/ca.pem, removing ...
I1122 00:20:10.844307 260527 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21934-9059/.minikube/ca.pem
I1122 00:20:10.844403 260527 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21934-9059/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21934-9059/.minikube/ca.pem (1082 bytes)
I1122 00:20:10.844496 260527 exec_runner.go:144] found /home/jenkins/minikube-integration/21934-9059/.minikube/cert.pem, removing ...
I1122 00:20:10.844506 260527 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21934-9059/.minikube/cert.pem
I1122 00:20:10.844532 260527 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21934-9059/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21934-9059/.minikube/cert.pem (1123 bytes)
I1122 00:20:10.844590 260527 exec_runner.go:144] found /home/jenkins/minikube-integration/21934-9059/.minikube/key.pem, removing ...
I1122 00:20:10.844598 260527 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21934-9059/.minikube/key.pem
I1122 00:20:10.844620 260527 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21934-9059/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21934-9059/.minikube/key.pem (1679 bytes)
I1122 00:20:10.844669 260527 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21934-9059/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21934-9059/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21934-9059/.minikube/certs/ca-key.pem org=jenkins.embed-certs-491677 san=[127.0.0.1 192.168.85.2 embed-certs-491677 localhost minikube]
I1122 00:20:10.881095 260527 provision.go:177] copyRemoteCerts
I1122 00:20:10.881150 260527 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1122 00:20:10.881198 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:10.899974 260527 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33073 SSHKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/machines/embed-certs-491677/id_rsa Username:docker}
I1122 00:20:10.993091 260527 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21934-9059/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1122 00:20:11.014763 260527 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21934-9059/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1122 00:20:11.034702 260527 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21934-9059/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1122 00:20:11.053678 260527 provision.go:87] duration metric: took 228.246896ms to configureAuth
I1122 00:20:11.053708 260527 ubuntu.go:206] setting minikube options for container-runtime
I1122 00:20:11.053892 260527 config.go:182] Loaded profile config "embed-certs-491677": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1122 00:20:11.053909 260527 machine.go:97] duration metric: took 3.67941396s to provisionDockerMachine
I1122 00:20:11.053917 260527 client.go:176] duration metric: took 9.342299036s to LocalClient.Create
I1122 00:20:11.053943 260527 start.go:167] duration metric: took 9.342388491s to libmachine.API.Create "embed-certs-491677"
I1122 00:20:11.053956 260527 start.go:293] postStartSetup for "embed-certs-491677" (driver="docker")
I1122 00:20:11.053984 260527 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1122 00:20:11.054052 260527 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1122 00:20:11.054103 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:11.073167 260527 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33073 SSHKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/machines/embed-certs-491677/id_rsa Username:docker}
I1122 00:20:11.168158 260527 ssh_runner.go:195] Run: cat /etc/os-release
I1122 00:20:11.172076 260527 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1122 00:20:11.172422 260527 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1122 00:20:11.172459 260527 filesync.go:126] Scanning /home/jenkins/minikube-integration/21934-9059/.minikube/addons for local assets ...
I1122 00:20:11.172556 260527 filesync.go:126] Scanning /home/jenkins/minikube-integration/21934-9059/.minikube/files for local assets ...
I1122 00:20:11.172675 260527 filesync.go:149] local asset: /home/jenkins/minikube-integration/21934-9059/.minikube/files/etc/ssl/certs/145302.pem -> 145302.pem in /etc/ssl/certs
I1122 00:20:11.172811 260527 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1122 00:20:11.182207 260527 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21934-9059/.minikube/files/etc/ssl/certs/145302.pem --> /etc/ssl/certs/145302.pem (1708 bytes)
I1122 00:20:11.203784 260527 start.go:296] duration metric: took 149.811059ms for postStartSetup
I1122 00:20:11.204173 260527 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" embed-certs-491677
I1122 00:20:11.222954 260527 profile.go:143] Saving config to /home/jenkins/minikube-integration/21934-9059/.minikube/profiles/embed-certs-491677/config.json ...
I1122 00:20:11.223305 260527 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1122 00:20:11.223354 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:11.242018 260527 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33073 SSHKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/machines/embed-certs-491677/id_rsa Username:docker}
I1122 00:20:11.333726 260527 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1122 00:20:11.338527 260527 start.go:128] duration metric: took 9.62936097s to createHost
I1122 00:20:11.338558 260527 start.go:83] releasing machines lock for "embed-certs-491677", held for 9.629502399s
I1122 00:20:11.338631 260527 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" embed-certs-491677
I1122 00:20:11.357563 260527 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1122 00:20:11.357634 260527 ssh_runner.go:195] Run: cat /version.json
I1122 00:20:11.357684 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:11.357690 260527 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" embed-certs-491677
I1122 00:20:11.377098 260527 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33073 SSHKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/machines/embed-certs-491677/id_rsa Username:docker}
I1122 00:20:11.378067 260527 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33073 SSHKeyPath:/home/jenkins/minikube-integration/21934-9059/.minikube/machines/embed-certs-491677/id_rsa Username:docker}
I1122 00:20:08.727161 218693 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1122 00:20:08.727652 218693 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1122 00:20:08.727710 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1122 00:20:08.727762 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1122 00:20:08.754498 218693 cri.go:89] found id: "031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:20:08.754522 218693 cri.go:89] found id: "2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:20:08.754527 218693 cri.go:89] found id: ""
I1122 00:20:08.754535 218693 logs.go:282] 2 containers: [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587]
I1122 00:20:08.754583 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:08.758867 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:08.762449 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1122 00:20:08.762501 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1122 00:20:08.788422 218693 cri.go:89] found id: "ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:20:08.788444 218693 cri.go:89] found id: ""
I1122 00:20:08.788455 218693 logs.go:282] 1 containers: [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7]
I1122 00:20:08.788512 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:08.792603 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1122 00:20:08.792668 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1122 00:20:08.820677 218693 cri.go:89] found id: ""
I1122 00:20:08.820703 218693 logs.go:282] 0 containers: []
W1122 00:20:08.820711 218693 logs.go:284] No container was found matching "coredns"
I1122 00:20:08.820717 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1122 00:20:08.820769 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1122 00:20:08.848396 218693 cri.go:89] found id: "8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:20:08.848418 218693 cri.go:89] found id: "b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:20:08.848422 218693 cri.go:89] found id: ""
I1122 00:20:08.848429 218693 logs.go:282] 2 containers: [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2]
I1122 00:20:08.848485 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:08.852633 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:08.856393 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1122 00:20:08.856469 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1122 00:20:08.884423 218693 cri.go:89] found id: ""
I1122 00:20:08.884454 218693 logs.go:282] 0 containers: []
W1122 00:20:08.884467 218693 logs.go:284] No container was found matching "kube-proxy"
I1122 00:20:08.884476 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1122 00:20:08.884529 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1122 00:20:08.911898 218693 cri.go:89] found id: "91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:20:08.911917 218693 cri.go:89] found id: "13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:20:08.911921 218693 cri.go:89] found id: ""
I1122 00:20:08.911928 218693 logs.go:282] 2 containers: [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216]
I1122 00:20:08.912000 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:08.916097 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:08.919808 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1122 00:20:08.919868 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1122 00:20:08.945704 218693 cri.go:89] found id: ""
I1122 00:20:08.945731 218693 logs.go:282] 0 containers: []
W1122 00:20:08.945742 218693 logs.go:284] No container was found matching "kindnet"
I1122 00:20:08.945750 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1122 00:20:08.945811 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1122 00:20:08.971599 218693 cri.go:89] found id: ""
I1122 00:20:08.971630 218693 logs.go:282] 0 containers: []
W1122 00:20:08.971642 218693 logs.go:284] No container was found matching "storage-provisioner"
I1122 00:20:08.971658 218693 logs.go:123] Gathering logs for dmesg ...
I1122 00:20:08.971686 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1122 00:20:08.985779 218693 logs.go:123] Gathering logs for etcd [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7] ...
I1122 00:20:08.985806 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:20:09.018373 218693 logs.go:123] Gathering logs for kube-scheduler [b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2] ...
I1122 00:20:09.018407 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:20:09.055328 218693 logs.go:123] Gathering logs for containerd ...
I1122 00:20:09.055359 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1122 00:20:09.098567 218693 logs.go:123] Gathering logs for kubelet ...
I1122 00:20:09.098608 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1122 00:20:09.183392 218693 logs.go:123] Gathering logs for describe nodes ...
I1122 00:20:09.183433 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1122 00:20:09.242636 218693 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1122 00:20:09.242654 218693 logs.go:123] Gathering logs for kube-apiserver [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d] ...
I1122 00:20:09.242666 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:20:09.276133 218693 logs.go:123] Gathering logs for kube-apiserver [2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587] ...
I1122 00:20:09.276179 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:20:09.310731 218693 logs.go:123] Gathering logs for kube-scheduler [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78] ...
I1122 00:20:09.310769 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:20:09.362187 218693 logs.go:123] Gathering logs for kube-controller-manager [91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a] ...
I1122 00:20:09.362226 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 91989ea7d1eb87264ea639688db06633fb66749e41f18e88a4bd9a185ac7a68a"
I1122 00:20:09.391737 218693 logs.go:123] Gathering logs for kube-controller-manager [13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216] ...
I1122 00:20:09.391763 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 13facf83677f37e3b97292f0f2dc164096fcd8cff5e71fdcf0ad085e3602f216"
I1122 00:20:09.425753 218693 logs.go:123] Gathering logs for container status ...
I1122 00:20:09.425787 218693 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1122 00:20:11.959328 218693 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1122 00:20:11.959805 218693 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1122 00:20:11.959868 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1122 00:20:11.959935 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1122 00:20:11.993113 218693 cri.go:89] found id: "031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d"
I1122 00:20:11.993137 218693 cri.go:89] found id: "2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587"
I1122 00:20:11.993143 218693 cri.go:89] found id: ""
I1122 00:20:11.993153 218693 logs.go:282] 2 containers: [031d1abe53b88560dbac18645a9d04e01621af0ebc4bdacf93ba7cd987bdbc7d 2e3aaa0d96c2cc9d110b994e3df108e0a78b3e80dae0dc52febf87cbdd528587]
I1122 00:20:11.993213 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:11.997946 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:12.002616 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1122 00:20:12.002741 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1122 00:20:12.040113 218693 cri.go:89] found id: "ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7"
I1122 00:20:12.040150 218693 cri.go:89] found id: ""
I1122 00:20:12.040160 218693 logs.go:282] 1 containers: [ce556a5394180410f0cc434955d664ca0f25f8999150ddb0c902378b8f0ec7b7]
I1122 00:20:12.040220 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:12.045665 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1122 00:20:12.045732 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1122 00:20:12.081343 218693 cri.go:89] found id: ""
I1122 00:20:12.081375 218693 logs.go:282] 0 containers: []
W1122 00:20:12.081384 218693 logs.go:284] No container was found matching "coredns"
I1122 00:20:12.081389 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1122 00:20:12.081449 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1122 00:20:12.116486 218693 cri.go:89] found id: "8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78"
I1122 00:20:12.117024 218693 cri.go:89] found id: "b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2"
I1122 00:20:12.117045 218693 cri.go:89] found id: ""
I1122 00:20:12.117055 218693 logs.go:282] 2 containers: [8cb86218489f4e4ea496e1edbb81a5bfa9657517fe1f841ab1f262169880ef78 b072fb61e5e8d9fb0c450f6123a51d4ba86ee5162b4c0378a606893fb26410b2]
I1122 00:20:12.117115 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:12.121469 218693 ssh_runner.go:195] Run: which crictl
I1122 00:20:12.125453 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1122 00:20:12.125520 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1122 00:20:12.159076 218693 cri.go:89] found id: ""
I1122 00:20:12.159108 218693 logs.go:282] 0 containers: []
W1122 00:20:12.159121 218693 logs.go:284] No container was found matching "kube-proxy"
I1122 00:20:12.159130 218693 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1122 00:20:12.159191 218693 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1122 00:20:11.523900 260527 ssh_runner.go:195] Run: systemctl --version
I1122 00:20:11.531084 260527 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1122 00:20:11.536010 260527 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1122 00:20:11.536130 260527 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1122 00:20:11.563766 260527 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1122 00:20:11.563792 260527 start.go:496] detecting cgroup driver to use...
I1122 00:20:11.563830 260527 detect.go:190] detected "systemd" cgroup driver on host os
I1122 00:20:11.563873 260527 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1122 00:20:11.579543 260527 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1122 00:20:11.593598 260527 docker.go:218] disabling cri-docker service (if available) ...
I1122 00:20:11.593666 260527 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1122 00:20:11.610889 260527 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1122 00:20:11.629723 260527 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1122 00:20:11.730670 260527 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1122 00:20:11.819921 260527 docker.go:234] disabling docker service ...
I1122 00:20:11.819985 260527 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1122 00:20:11.839159 260527 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1122 00:20:11.854142 260527 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1122 00:20:11.943699 260527 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1122 00:20:12.053855 260527 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1122 00:20:12.073171 260527 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1122 00:20:12.089999 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1122 00:20:12.105012 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1122 00:20:12.117591 260527 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1122 00:20:12.117652 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1122 00:20:12.128817 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1122 00:20:12.142147 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1122 00:20:12.154635 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1122 00:20:12.169029 260527 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1122 00:20:12.181631 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1122 00:20:12.194568 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1122 00:20:12.207294 260527 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1122 00:20:12.218684 260527 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1122 00:20:12.228679 260527 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1122 00:20:12.241707 260527 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1122 00:20:12.337447 260527 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1122 00:20:12.443801 260527 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1122 00:20:12.443870 260527 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1122 00:20:12.448114 260527 start.go:564] Will wait 60s for crictl version
I1122 00:20:12.448178 260527 ssh_runner.go:195] Run: which crictl
I1122 00:20:12.452113 260527 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1122 00:20:12.481619 260527 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1122 00:20:12.481687 260527 ssh_runner.go:195] Run: containerd --version
I1122 00:20:12.506954 260527 ssh_runner.go:195] Run: containerd --version
I1122 00:20:12.537127 260527 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
W1122 00:20:08.528688 251199 node_ready.go:57] node "no-preload-781232" has "Ready":"False" status (will retry)
W1122 00:20:10.529626 251199 node_ready.go:57] node "no-preload-781232" has "Ready":"False" status (will retry)
I1122 00:20:12.029744 251199 node_ready.go:49] node "no-preload-781232" is "Ready"
I1122 00:20:12.029782 251199 node_ready.go:38] duration metric: took 14.503754974s for node "no-preload-781232" to be "Ready" ...
I1122 00:20:12.029799 251199 api_server.go:52] waiting for apiserver process to appear ...
I1122 00:20:12.029867 251199 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1122 00:20:12.049755 251199 api_server.go:72] duration metric: took 14.826557708s to wait for apiserver process to appear ...
I1122 00:20:12.049782 251199 api_server.go:88] waiting for apiserver healthz status ...
I1122 00:20:12.049803 251199 api_server.go:253] Checking apiserver healthz at https://192.168.94.2:8443/healthz ...
I1122 00:20:12.055733 251199 api_server.go:279] https://192.168.94.2:8443/healthz returned 200:
ok
I1122 00:20:12.057374 251199 api_server.go:141] control plane version: v1.34.1
I1122 00:20:12.057405 251199 api_server.go:131] duration metric: took 7.61544ms to wait for apiserver health ...
I1122 00:20:12.057416 251199 system_pods.go:43] waiting for kube-system pods to appear ...
I1122 00:20:12.062154 251199 system_pods.go:59] 8 kube-system pods found
I1122 00:20:12.062190 251199 system_pods.go:61] "coredns-66bc5c9577-9wcct" [67b97cc5-016b-44d1-8119-dd6aa4932f83] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:12.062199 251199 system_pods.go:61] "etcd-no-preload-781232" [85c9627b-3102-439d-83e4-9ee3353591c1] Running
I1122 00:20:12.062207 251199 system_pods.go:61] "kindnet-llcnc" [fcdd9f25-4804-47c2-8f09-b6a2d688a8bc] Running
I1122 00:20:12.062212 251199 system_pods.go:61] "kube-apiserver-no-preload-781232" [4a4b5bf8-8262-46c5-9aa8-5a0bb0af364c] Running
I1122 00:20:12.062218 251199 system_pods.go:61] "kube-controller-manager-no-preload-781232" [0c4fed80-9ce3-4b0d-99dd-ae11fc92104e] Running
I1122 00:20:12.062223 251199 system_pods.go:61] "kube-proxy-685jg" [33a2d2c1-e364-4ec8-a9a0-69ba9146625f] Running
I1122 00:20:12.062228 251199 system_pods.go:61] "kube-scheduler-no-preload-781232" [ec2ea83e-6638-4945-b4e4-ef3142f30481] Running
I1122 00:20:12.062237 251199 system_pods.go:61] "storage-provisioner" [904bdf70-7728-45c5-a9ae-487aed28e6fc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1122 00:20:12.062245 251199 system_pods.go:74] duration metric: took 4.821603ms to wait for pod list to return data ...
I1122 00:20:12.062254 251199 default_sa.go:34] waiting for default service account to be created ...
I1122 00:20:12.065112 251199 default_sa.go:45] found service account: "default"
I1122 00:20:12.065138 251199 default_sa.go:55] duration metric: took 2.848928ms for default service account to be created ...
I1122 00:20:12.065149 251199 system_pods.go:116] waiting for k8s-apps to be running ...
I1122 00:20:12.069582 251199 system_pods.go:86] 8 kube-system pods found
I1122 00:20:12.069625 251199 system_pods.go:89] "coredns-66bc5c9577-9wcct" [67b97cc5-016b-44d1-8119-dd6aa4932f83] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:12.069633 251199 system_pods.go:89] "etcd-no-preload-781232" [85c9627b-3102-439d-83e4-9ee3353591c1] Running
I1122 00:20:12.069648 251199 system_pods.go:89] "kindnet-llcnc" [fcdd9f25-4804-47c2-8f09-b6a2d688a8bc] Running
I1122 00:20:12.069655 251199 system_pods.go:89] "kube-apiserver-no-preload-781232" [4a4b5bf8-8262-46c5-9aa8-5a0bb0af364c] Running
I1122 00:20:12.069661 251199 system_pods.go:89] "kube-controller-manager-no-preload-781232" [0c4fed80-9ce3-4b0d-99dd-ae11fc92104e] Running
I1122 00:20:12.069666 251199 system_pods.go:89] "kube-proxy-685jg" [33a2d2c1-e364-4ec8-a9a0-69ba9146625f] Running
I1122 00:20:12.069670 251199 system_pods.go:89] "kube-scheduler-no-preload-781232" [ec2ea83e-6638-4945-b4e4-ef3142f30481] Running
I1122 00:20:12.069676 251199 system_pods.go:89] "storage-provisioner" [904bdf70-7728-45c5-a9ae-487aed28e6fc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1122 00:20:12.069728 251199 retry.go:31] will retry after 227.269849ms: missing components: kube-dns
I1122 00:20:12.301834 251199 system_pods.go:86] 8 kube-system pods found
I1122 00:20:12.301869 251199 system_pods.go:89] "coredns-66bc5c9577-9wcct" [67b97cc5-016b-44d1-8119-dd6aa4932f83] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1122 00:20:12.301877 251199 system_pods.go:89] "etcd-no-preload-781232" [85c9627b-3102-439d-83e4-9ee3353591c1] Running
I1122 00:20:12.301886 251199 system_pods.go:89] "kindnet-llcnc" [fcdd9f25-4804-47c2-8f09-b6a2d688a8bc] Running
I1122 00:20:12.301892 251199 system_pods.go:89] "kube-apiserver-no-preload-781232" [4a4b5bf8-8262-46c5-9aa8-5a0bb0af364c] Running
I1122 00:20:12.301898 251199 system_pods.go:89] "kube-controller-manager-no-preload-781232" [0c4fed80-9ce3-4b0d-99dd-ae11fc92104e] Running
I1122 00:20:12.301903 251199 system_pods.go:89] "kube-proxy-685jg" [33a2d2c1-e364-4ec8-a9a0-69ba9146625f] Running
I1122 00:20:12.301910 251199 system_pods.go:89] "kube-scheduler-no-preload-781232" [ec2ea83e-6638-4945-b4e4-ef3142f30481] Running
I1122 00:20:12.301917 251199 system_pods.go:89] "storage-provisioner" [904bdf70-7728-45c5-a9ae-487aed28e6fc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1122 00:20:12.301938 251199 retry.go:31] will retry after 387.887736ms: missing components: kube-dns
I1122 00:20:12.694992 251199 system_pods.go:86] 8 kube-system pods found
I1122 00:20:12.695026 251199 system_pods.go:89] "coredns-66bc5c9577-9wcct" [67b97cc5-016b-44d1-8119-dd6aa4932f83] Running
I1122 00:20:12.695035 251199 system_pods.go:89] "etcd-no-preload-781232" [85c9627b-3102-439d-83e4-9ee3353591c1] Running
I1122 00:20:12.695041 251199 system_pods.go:89] "kindnet-llcnc" [fcdd9f25-4804-47c2-8f09-b6a2d688a8bc] Running
I1122 00:20:12.695047 251199 system_pods.go:89] "kube-apiserver-no-preload-781232" [4a4b5bf8-8262-46c5-9aa8-5a0bb0af364c] Running
I1122 00:20:12.695052 251199 system_pods.go:89] "kube-controller-manager-no-preload-781232" [0c4fed80-9ce3-4b0d-99dd-ae11fc92104e] Running
I1122 00:20:12.695060 251199 system_pods.go:89] "kube-proxy-685jg" [33a2d2c1-e364-4ec8-a9a0-69ba9146625f] Running
I1122 00:20:12.695065 251199 system_pods.go:89] "kube-scheduler-no-preload-781232" [ec2ea83e-6638-4945-b4e4-ef3142f30481] Running
I1122 00:20:12.695070 251199 system_pods.go:89] "storage-provisioner" [904bdf70-7728-45c5-a9ae-487aed28e6fc] Running
I1122 00:20:12.695080 251199 system_pods.go:126] duration metric: took 629.924123ms to wait for k8s-apps to be running ...
I1122 00:20:12.695093 251199 system_svc.go:44] waiting for kubelet service to be running ....
I1122 00:20:12.695144 251199 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1122 00:20:12.708823 251199 system_svc.go:56] duration metric: took 13.721013ms WaitForService to wait for kubelet
I1122 00:20:12.708855 251199 kubeadm.go:587] duration metric: took 15.485663176s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1122 00:20:12.708874 251199 node_conditions.go:102] verifying NodePressure condition ...
I1122 00:20:12.712345 251199 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1122 00:20:12.712376 251199 node_conditions.go:123] node cpu capacity is 8
I1122 00:20:12.712396 251199 node_conditions.go:105] duration metric: took 3.516354ms to run NodePressure ...
I1122 00:20:12.712412 251199 start.go:242] waiting for startup goroutines ...
I1122 00:20:12.712423 251199 start.go:247] waiting for cluster config update ...
I1122 00:20:12.712441 251199 start.go:256] writing updated cluster config ...
I1122 00:20:12.712733 251199 ssh_runner.go:195] Run: rm -f paused
I1122 00:20:12.717390 251199 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1122 00:20:12.721696 251199 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-9wcct" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:12.726947 251199 pod_ready.go:94] pod "coredns-66bc5c9577-9wcct" is "Ready"
I1122 00:20:12.726976 251199 pod_ready.go:86] duration metric: took 5.255643ms for pod "coredns-66bc5c9577-9wcct" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:12.729559 251199 pod_ready.go:83] waiting for pod "etcd-no-preload-781232" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:12.734425 251199 pod_ready.go:94] pod "etcd-no-preload-781232" is "Ready"
I1122 00:20:12.734455 251199 pod_ready.go:86] duration metric: took 4.86467ms for pod "etcd-no-preload-781232" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:12.736916 251199 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-781232" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:12.741485 251199 pod_ready.go:94] pod "kube-apiserver-no-preload-781232" is "Ready"
I1122 00:20:12.741515 251199 pod_ready.go:86] duration metric: took 4.574913ms for pod "kube-apiserver-no-preload-781232" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:12.743848 251199 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-781232" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:13.121924 251199 pod_ready.go:94] pod "kube-controller-manager-no-preload-781232" is "Ready"
I1122 00:20:13.121957 251199 pod_ready.go:86] duration metric: took 378.084436ms for pod "kube-controller-manager-no-preload-781232" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:13.322463 251199 pod_ready.go:83] waiting for pod "kube-proxy-685jg" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:13.721973 251199 pod_ready.go:94] pod "kube-proxy-685jg" is "Ready"
I1122 00:20:13.722003 251199 pod_ready.go:86] duration metric: took 399.513258ms for pod "kube-proxy-685jg" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:13.922497 251199 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-781232" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:14.322798 251199 pod_ready.go:94] pod "kube-scheduler-no-preload-781232" is "Ready"
I1122 00:20:14.322835 251199 pod_ready.go:86] duration metric: took 400.307889ms for pod "kube-scheduler-no-preload-781232" in "kube-system" namespace to be "Ready" or be gone ...
I1122 00:20:14.322851 251199 pod_ready.go:40] duration metric: took 1.605427799s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1122 00:20:14.392629 251199 start.go:628] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1122 00:20:14.394856 251199 out.go:179] * Done! kubectl is now configured to use "no-preload-781232" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
d01de905a2d07 56cc512116c8f 8 seconds ago Running busybox 0 e511b813570c1 busybox default
f7527a8afc668 ead0a4a53df89 15 seconds ago Running coredns 0 b00fa05a6c375 coredns-5dd5756b68-pqbfp kube-system
f2a1ec178c227 6e38f40d628db 15 seconds ago Running storage-provisioner 0 a3bbedf747991 storage-provisioner kube-system
abad042f2a4ad 409467f978b4a 26 seconds ago Running kindnet-cni 0 721fcd34a44d6 kindnet-ldtd8 kube-system
5119ee9a69fb3 ea1030da44aa1 29 seconds ago Running kube-proxy 0 be780c30602ce kube-proxy-kqrng kube-system
4c35680ab2dd6 73deb9a3f7025 49 seconds ago Running etcd 0 adbbfe9941b27 etcd-old-k8s-version-462319 kube-system
1863b35aae093 f6f496300a2ae 49 seconds ago Running kube-scheduler 0 45afb7772f575 kube-scheduler-old-k8s-version-462319 kube-system
e398c42ad8188 bb5e0dde9054c 49 seconds ago Running kube-apiserver 0 0ce7c78109ce7 kube-apiserver-old-k8s-version-462319 kube-system
355ecffe75a3f 4be79c38a4bab 49 seconds ago Running kube-controller-manager 0 5dfd6ffd80d1f kube-controller-manager-old-k8s-version-462319 kube-system
==> containerd <==
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.327237013Z" level=info msg="connecting to shim f2a1ec178c227617bd32e678c94e3d44e606683f0b10ccdbc182dec6d6d5c9e9" address="unix:///run/containerd/s/62835cccd20d8437bb636df9ea457fe2506fdd9387d47f5e31a45c75f852a444" protocol=ttrpc version=3
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.328631129Z" level=info msg="CreateContainer within sandbox \"b00fa05a6c375cb07b56b89e739f90401ad7f950dedcb886ca1774eba46a4293\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.336790890Z" level=info msg="Container f7527a8afc6683a9935b781bf3006cc9c368a534f3eafba3501b6509659a437b: CDI devices from CRI Config.CDIDevices: []"
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.343474448Z" level=info msg="CreateContainer within sandbox \"b00fa05a6c375cb07b56b89e739f90401ad7f950dedcb886ca1774eba46a4293\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"f7527a8afc6683a9935b781bf3006cc9c368a534f3eafba3501b6509659a437b\""
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.344107519Z" level=info msg="StartContainer for \"f7527a8afc6683a9935b781bf3006cc9c368a534f3eafba3501b6509659a437b\""
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.345166179Z" level=info msg="connecting to shim f7527a8afc6683a9935b781bf3006cc9c368a534f3eafba3501b6509659a437b" address="unix:///run/containerd/s/39593751a6c9fe87428291df6153bccdab6c22a754601ae94cfc40e697ece6ec" protocol=ttrpc version=3
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.389133316Z" level=info msg="StartContainer for \"f2a1ec178c227617bd32e678c94e3d44e606683f0b10ccdbc182dec6d6d5c9e9\" returns successfully"
Nov 22 00:20:01 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:01.404040136Z" level=info msg="StartContainer for \"f7527a8afc6683a9935b781bf3006cc9c368a534f3eafba3501b6509659a437b\" returns successfully"
Nov 22 00:20:05 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:05.083706178Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:89dd9411-148d-4a8e-98d3-a51a8eab9d35,Namespace:default,Attempt:0,}"
Nov 22 00:20:05 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:05.877683353Z" level=info msg="connecting to shim e511b813570c19e1d5c5c2002304caba5cc1bac5847092a53135ba9cb1b1dd7c" address="unix:///run/containerd/s/b045fc79abfabe20fc9affb730c643e7c442531994f349b7904cd7f34ab0272a" namespace=k8s.io protocol=ttrpc version=3
Nov 22 00:20:06 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:06.066243350Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:89dd9411-148d-4a8e-98d3-a51a8eab9d35,Namespace:default,Attempt:0,} returns sandbox id \"e511b813570c19e1d5c5c2002304caba5cc1bac5847092a53135ba9cb1b1dd7c\""
Nov 22 00:20:06 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:06.068244404Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.300595484Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.301398927Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396644"
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.302750252Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.304853958Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.305213907Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.236905893s"
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.305247082Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.306892429Z" level=info msg="CreateContainer within sandbox \"e511b813570c19e1d5c5c2002304caba5cc1bac5847092a53135ba9cb1b1dd7c\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.314973197Z" level=info msg="Container d01de905a2d0700ad9691d5a73cf41f69bb587ec67e218858862ae31fcd53485: CDI devices from CRI Config.CDIDevices: []"
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.321465429Z" level=info msg="CreateContainer within sandbox \"e511b813570c19e1d5c5c2002304caba5cc1bac5847092a53135ba9cb1b1dd7c\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"d01de905a2d0700ad9691d5a73cf41f69bb587ec67e218858862ae31fcd53485\""
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.322134703Z" level=info msg="StartContainer for \"d01de905a2d0700ad9691d5a73cf41f69bb587ec67e218858862ae31fcd53485\""
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.323141205Z" level=info msg="connecting to shim d01de905a2d0700ad9691d5a73cf41f69bb587ec67e218858862ae31fcd53485" address="unix:///run/containerd/s/b045fc79abfabe20fc9affb730c643e7c442531994f349b7904cd7f34ab0272a" protocol=ttrpc version=3
Nov 22 00:20:08 old-k8s-version-462319 containerd[666]: time="2025-11-22T00:20:08.376916692Z" level=info msg="StartContainer for \"d01de905a2d0700ad9691d5a73cf41f69bb587ec67e218858862ae31fcd53485\" returns successfully"
Nov 22 00:20:13 old-k8s-version-462319 containerd[666]: E1122 00:20:13.803924 666 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [f7527a8afc6683a9935b781bf3006cc9c368a534f3eafba3501b6509659a437b] <==
[INFO] plugin/ready: Still waiting on: "kubernetes"
.:53
[INFO] plugin/reload: Running configuration SHA512 = 25cf5af2951e282c4b0e961a02fb5d3e57c974501832fee92eec17b5135b9ec9d9e87d2ac94e6d117a5ed3dd54e8800aa7b4479706eb54497145ccdb80397d1b
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:60216 - 50495 "HINFO IN 8122801349455611517.3511563579879947437. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.074291599s
==> describe nodes <==
Name: old-k8s-version-462319
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-462319
kubernetes.io/os=linux
minikube.k8s.io/commit=299bbe887a12c40541707cc636234f35f4ff1785
minikube.k8s.io/name=old-k8s-version-462319
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_22T00_19_34_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sat, 22 Nov 2025 00:19:29 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-462319
AcquireTime: <unset>
RenewTime: Sat, 22 Nov 2025 00:20:14 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sat, 22 Nov 2025 00:20:04 +0000 Sat, 22 Nov 2025 00:19:28 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 22 Nov 2025 00:20:04 +0000 Sat, 22 Nov 2025 00:19:28 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 22 Nov 2025 00:20:04 +0000 Sat, 22 Nov 2025 00:19:28 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sat, 22 Nov 2025 00:20:04 +0000 Sat, 22 Nov 2025 00:20:00 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.103.2
Hostname: old-k8s-version-462319
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
System Info:
Machine ID: 5665009e93b91d39dc05718b691e3875
System UUID: 1a763c28-0497-45f3-b9e8-458b8b4eb589
Boot ID: 725aae03-f893-4e0b-b029-cbd3b00ccfdd
Kernel Version: 6.8.0-1044-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13s
kube-system coredns-5dd5756b68-pqbfp 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 31s
kube-system etcd-old-k8s-version-462319 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 44s
kube-system kindnet-ldtd8 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 31s
kube-system kube-apiserver-old-k8s-version-462319 250m (3%) 0 (0%) 0 (0%) 0 (0%) 44s
kube-system kube-controller-manager-old-k8s-version-462319 200m (2%) 0 (0%) 0 (0%) 0 (0%) 46s
kube-system kube-proxy-kqrng 0 (0%) 0 (0%) 0 (0%) 0 (0%) 31s
kube-system kube-scheduler-old-k8s-version-462319 100m (1%) 0 (0%) 0 (0%) 0 (0%) 44s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 30s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 29s kube-proxy
Normal Starting 44s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 44s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 44s kubelet Node old-k8s-version-462319 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 44s kubelet Node old-k8s-version-462319 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 44s kubelet Node old-k8s-version-462319 status is now: NodeHasSufficientPID
Normal RegisteredNode 32s node-controller Node old-k8s-version-462319 event: Registered Node old-k8s-version-462319 in Controller
Normal NodeReady 17s kubelet Node old-k8s-version-462319 status is now: NodeReady
==> dmesg <==
[Nov21 23:17] MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.
[ +0.000865] TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.
[ +0.001000] MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.
[ +0.087013] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.410276] i8042: Warning: Keylock active
[ +0.014947] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.495836] block sda: the capability attribute has been deprecated.
[ +0.091740] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.024333] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +5.452540] kauditd_printk_skb: 47 callbacks suppressed
==> etcd [4c35680ab2dd6966de549749b29af9a5a8bccb172d03360ef57391e45ea9f885] <==
{"level":"info","ts":"2025-11-22T00:19:28.060277Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 became leader at term 2"}
{"level":"info","ts":"2025-11-22T00:19:28.060288Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: f23060b075c4c089 elected leader f23060b075c4c089 at term 2"}
{"level":"info","ts":"2025-11-22T00:19:28.061026Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-22T00:19:28.061614Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-22T00:19:28.061614Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"f23060b075c4c089","local-member-attributes":"{Name:old-k8s-version-462319 ClientURLs:[https://192.168.103.2:2379]}","request-path":"/0/members/f23060b075c4c089/attributes","cluster-id":"3336683c081d149d","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-22T00:19:28.061648Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-22T00:19:28.06183Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"3336683c081d149d","local-member-id":"f23060b075c4c089","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-22T00:19:28.0621Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-22T00:19:28.062388Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-22T00:19:28.062242Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-22T00:19:28.062743Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-22T00:19:28.064288Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.103.2:2379"}
{"level":"info","ts":"2025-11-22T00:19:28.064366Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-22T00:19:32.633697Z","caller":"traceutil/trace.go:171","msg":"trace[64928526] transaction","detail":"{read_only:false; response_revision:210; number_of_response:1; }","duration":"260.007025ms","start":"2025-11-22T00:19:32.373672Z","end":"2025-11-22T00:19:32.633679Z","steps":["trace[64928526] 'process raft request' (duration: 259.898405ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-22T00:19:33.081079Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"335.74286ms","expected-duration":"100ms","prefix":"","request":"header:<ID:13873790177431359743 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/secrets/kube-system/bootstrap-token-vumgow\" mod_revision:0 > success:<request_put:<key:\"/registry/secrets/kube-system/bootstrap-token-vumgow\" value_size:617 >> failure:<>>","response":"size:16"}
{"level":"info","ts":"2025-11-22T00:19:33.081182Z","caller":"traceutil/trace.go:171","msg":"trace[454440905] transaction","detail":"{read_only:false; response_revision:211; number_of_response:1; }","duration":"441.168552ms","start":"2025-11-22T00:19:32.639997Z","end":"2025-11-22T00:19:33.081166Z","steps":["trace[454440905] 'process raft request' (duration: 104.950033ms)","trace[454440905] 'compare' (duration: 335.635432ms)"],"step_count":2}
{"level":"warn","ts":"2025-11-22T00:19:33.081293Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2025-11-22T00:19:32.63998Z","time spent":"441.252908ms","remote":"127.0.0.1:42828","response type":"/etcdserverpb.KV/Txn","request count":1,"request size":677,"response count":0,"response size":39,"request content":"compare:<target:MOD key:\"/registry/secrets/kube-system/bootstrap-token-vumgow\" mod_revision:0 > success:<request_put:<key:\"/registry/secrets/kube-system/bootstrap-token-vumgow\" value_size:617 >> failure:<>"}
{"level":"warn","ts":"2025-11-22T00:19:44.266771Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"130.299403ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/serviceaccounts/kube-system/bootstrap-signer\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-11-22T00:19:44.266864Z","caller":"traceutil/trace.go:171","msg":"trace[842289003] range","detail":"{range_begin:/registry/serviceaccounts/kube-system/bootstrap-signer; range_end:; response_count:0; response_revision:282; }","duration":"130.453771ms","start":"2025-11-22T00:19:44.136394Z","end":"2025-11-22T00:19:44.266847Z","steps":["trace[842289003] 'range keys from in-memory index tree' (duration: 130.216573ms)"],"step_count":1}
{"level":"info","ts":"2025-11-22T00:19:44.386458Z","caller":"traceutil/trace.go:171","msg":"trace[490276607] linearizableReadLoop","detail":"{readStateIndex:296; appliedIndex:295; }","duration":"101.94453ms","start":"2025-11-22T00:19:44.284493Z","end":"2025-11-22T00:19:44.386437Z","steps":["trace[490276607] 'read index received' (duration: 101.776407ms)","trace[490276607] 'applied index is now lower than readState.Index' (duration: 167.67µs)"],"step_count":2}
{"level":"info","ts":"2025-11-22T00:19:44.386547Z","caller":"traceutil/trace.go:171","msg":"trace[1514742623] transaction","detail":"{read_only:false; response_revision:283; number_of_response:1; }","duration":"114.786396ms","start":"2025-11-22T00:19:44.271741Z","end":"2025-11-22T00:19:44.386527Z","steps":["trace[1514742623] 'process raft request' (duration: 114.589176ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-22T00:19:44.386605Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"102.121151ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-11-22T00:19:44.386631Z","caller":"traceutil/trace.go:171","msg":"trace[800592602] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:283; }","duration":"102.163591ms","start":"2025-11-22T00:19:44.284459Z","end":"2025-11-22T00:19:44.386622Z","steps":["trace[800592602] 'agreement among raft nodes before linearized reading' (duration: 102.059746ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-22T00:20:06.401485Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"116.691938ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-11-22T00:20:06.401571Z","caller":"traceutil/trace.go:171","msg":"trace[919203119] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:425; }","duration":"116.801997ms","start":"2025-11-22T00:20:06.284749Z","end":"2025-11-22T00:20:06.401551Z","steps":["trace[919203119] 'range keys from in-memory index tree' (duration: 116.607287ms)"],"step_count":1}
==> kernel <==
00:20:17 up 1:02, 0 user, load average: 6.48, 3.76, 2.29
Linux old-k8s-version-462319 6.8.0-1044-gcp #47~22.04.1-Ubuntu SMP Thu Oct 23 21:07:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [abad042f2a4adf0bb5a1e42eb6090d0433dbd093e2502e0a0763cd88008fa485] <==
I1122 00:19:50.358053 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1122 00:19:50.379516 1 main.go:139] hostIP = 192.168.103.2
podIP = 192.168.103.2
I1122 00:19:50.379673 1 main.go:148] setting mtu 1500 for CNI
I1122 00:19:50.379699 1 main.go:178] kindnetd IP family: "ipv4"
I1122 00:19:50.379728 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-22T00:19:50Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1122 00:19:50.657926 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1122 00:19:50.657947 1 controller.go:381] "Waiting for informer caches to sync"
I1122 00:19:50.657972 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1122 00:19:50.658082 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1122 00:19:50.980378 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1122 00:19:50.980413 1 metrics.go:72] Registering metrics
I1122 00:19:50.980477 1 controller.go:711] "Syncing nftables rules"
I1122 00:20:00.663360 1 main.go:297] Handling node with IPs: map[192.168.103.2:{}]
I1122 00:20:00.663424 1 main.go:301] handling current node
I1122 00:20:10.657535 1 main.go:297] Handling node with IPs: map[192.168.103.2:{}]
I1122 00:20:10.657598 1 main.go:301] handling current node
==> kube-apiserver [e398c42ad8188a2a96d101f089a0968d374f75b6827a154f004bd956b9155274] <==
I1122 00:19:29.739253 1 apf_controller.go:380] Running API Priority and Fairness periodic rebalancing process
I1122 00:19:29.739494 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1122 00:19:29.739756 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1122 00:19:29.739791 1 aggregator.go:166] initial CRD sync complete...
I1122 00:19:29.739800 1 autoregister_controller.go:141] Starting autoregister controller
I1122 00:19:29.739807 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1122 00:19:29.739814 1 cache.go:39] Caches are synced for autoregister controller
I1122 00:19:29.740221 1 controller.go:624] quota admission added evaluator for: namespaces
I1122 00:19:29.740304 1 shared_informer.go:318] Caches are synced for configmaps
I1122 00:19:29.936021 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1122 00:19:30.645531 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1122 00:19:30.649522 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1122 00:19:30.649546 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1122 00:19:31.151928 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1122 00:19:31.192786 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1122 00:19:31.249628 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1122 00:19:31.255812 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.103.2]
I1122 00:19:31.257056 1 controller.go:624] quota admission added evaluator for: endpoints
I1122 00:19:31.261743 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1122 00:19:31.700612 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1122 00:19:33.349558 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1122 00:19:33.363593 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1122 00:19:33.376299 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1122 00:19:46.344730 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1122 00:19:46.397570 1 controller.go:624] quota admission added evaluator for: replicasets.apps
==> kube-controller-manager [355ecffe75a3ff0874bfe775cd91a06b9bfff9f2dc65c709c3da1adca76e11c1] <==
I1122 00:19:45.646325 1 shared_informer.go:318] Caches are synced for resource quota
I1122 00:19:45.687399 1 shared_informer.go:318] Caches are synced for disruption
I1122 00:19:45.693911 1 shared_informer.go:318] Caches are synced for resource quota
I1122 00:19:46.009572 1 shared_informer.go:318] Caches are synced for garbage collector
I1122 00:19:46.084787 1 shared_informer.go:318] Caches are synced for garbage collector
I1122 00:19:46.084820 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1122 00:19:46.355549 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-kqrng"
I1122 00:19:46.357410 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-ldtd8"
I1122 00:19:46.402945 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1122 00:19:46.497513 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-pqbfp"
I1122 00:19:46.505494 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-bjgv6"
I1122 00:19:46.515365 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="112.69029ms"
I1122 00:19:46.537252 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="21.812757ms"
I1122 00:19:46.537541 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="177.843µs"
I1122 00:19:47.048823 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1122 00:19:47.070179 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-bjgv6"
I1122 00:19:47.078565 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="31.08623ms"
I1122 00:19:47.085902 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="7.261706ms"
I1122 00:19:47.086048 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="90.581µs"
I1122 00:20:00.892386 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="138.286µs"
I1122 00:20:00.912888 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="84.033µs"
I1122 00:20:01.551233 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="124.993µs"
I1122 00:20:02.562092 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="7.330757ms"
I1122 00:20:02.562207 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="69.9µs"
I1122 00:20:05.541105 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [5119ee9a69fb309c6fe6c40bfdf7853c1d5fd0390280d45b28a695bd3259a0c0] <==
I1122 00:19:47.043350 1 server_others.go:69] "Using iptables proxy"
I1122 00:19:47.061630 1 node.go:141] Successfully retrieved node IP: 192.168.103.2
I1122 00:19:47.101193 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1122 00:19:47.103704 1 server_others.go:152] "Using iptables Proxier"
I1122 00:19:47.103745 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1122 00:19:47.103755 1 server_others.go:438] "Defaulting to no-op detect-local"
I1122 00:19:47.103806 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1122 00:19:47.104104 1 server.go:846] "Version info" version="v1.28.0"
I1122 00:19:47.104124 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1122 00:19:47.104828 1 config.go:188] "Starting service config controller"
I1122 00:19:47.104867 1 shared_informer.go:311] Waiting for caches to sync for service config
I1122 00:19:47.104926 1 config.go:97] "Starting endpoint slice config controller"
I1122 00:19:47.104932 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1122 00:19:47.105174 1 config.go:315] "Starting node config controller"
I1122 00:19:47.105210 1 shared_informer.go:311] Waiting for caches to sync for node config
I1122 00:19:47.205514 1 shared_informer.go:318] Caches are synced for endpoint slice config
I1122 00:19:47.205516 1 shared_informer.go:318] Caches are synced for service config
I1122 00:19:47.205561 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [1863b35aae093f7c8f897de1e1301f7582ed68975578bf5d2f19a845b5bbb715] <==
W1122 00:19:29.717451 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1122 00:19:29.717478 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1122 00:19:29.717458 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1122 00:19:29.717515 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1122 00:19:29.717553 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1122 00:19:29.717616 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1122 00:19:29.717652 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1122 00:19:29.717675 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1122 00:19:30.562109 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1122 00:19:30.562139 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1122 00:19:30.586044 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1122 00:19:30.586087 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1122 00:19:30.770112 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1122 00:19:30.770162 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1122 00:19:30.772555 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1122 00:19:30.772599 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1122 00:19:30.781374 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1122 00:19:30.781431 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W1122 00:19:30.807504 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1122 00:19:30.807533 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1122 00:19:30.845180 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1122 00:19:30.845236 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W1122 00:19:30.871051 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1122 00:19:30.871090 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
I1122 00:19:33.910375 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 22 00:19:45 old-k8s-version-462319 kubelet[1521]: I1122 00:19:45.613796 1521 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.364926 1521 topology_manager.go:215] "Topology Admit Handler" podUID="643cd348-4af3-4720-af0d-e931f184742c" podNamespace="kube-system" podName="kube-proxy-kqrng"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.365817 1521 topology_manager.go:215] "Topology Admit Handler" podUID="6bf161d2-c442-466d-98b8-c313a127bf22" podNamespace="kube-system" podName="kindnet-ldtd8"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.396776 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-295rj\" (UniqueName: \"kubernetes.io/projected/643cd348-4af3-4720-af0d-e931f184742c-kube-api-access-295rj\") pod \"kube-proxy-kqrng\" (UID: \"643cd348-4af3-4720-af0d-e931f184742c\") " pod="kube-system/kube-proxy-kqrng"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.398874 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/643cd348-4af3-4720-af0d-e931f184742c-lib-modules\") pod \"kube-proxy-kqrng\" (UID: \"643cd348-4af3-4720-af0d-e931f184742c\") " pod="kube-system/kube-proxy-kqrng"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.398955 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/6bf161d2-c442-466d-98b8-c313a127bf22-cni-cfg\") pod \"kindnet-ldtd8\" (UID: \"6bf161d2-c442-466d-98b8-c313a127bf22\") " pod="kube-system/kindnet-ldtd8"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.398980 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/6bf161d2-c442-466d-98b8-c313a127bf22-xtables-lock\") pod \"kindnet-ldtd8\" (UID: \"6bf161d2-c442-466d-98b8-c313a127bf22\") " pod="kube-system/kindnet-ldtd8"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.399025 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/6bf161d2-c442-466d-98b8-c313a127bf22-lib-modules\") pod \"kindnet-ldtd8\" (UID: \"6bf161d2-c442-466d-98b8-c313a127bf22\") " pod="kube-system/kindnet-ldtd8"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.399054 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/643cd348-4af3-4720-af0d-e931f184742c-kube-proxy\") pod \"kube-proxy-kqrng\" (UID: \"643cd348-4af3-4720-af0d-e931f184742c\") " pod="kube-system/kube-proxy-kqrng"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.399082 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/643cd348-4af3-4720-af0d-e931f184742c-xtables-lock\") pod \"kube-proxy-kqrng\" (UID: \"643cd348-4af3-4720-af0d-e931f184742c\") " pod="kube-system/kube-proxy-kqrng"
Nov 22 00:19:46 old-k8s-version-462319 kubelet[1521]: I1122 00:19:46.399117 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwtxn\" (UniqueName: \"kubernetes.io/projected/6bf161d2-c442-466d-98b8-c313a127bf22-kube-api-access-xwtxn\") pod \"kindnet-ldtd8\" (UID: \"6bf161d2-c442-466d-98b8-c313a127bf22\") " pod="kube-system/kindnet-ldtd8"
Nov 22 00:19:47 old-k8s-version-462319 kubelet[1521]: I1122 00:19:47.509109 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-kqrng" podStartSLOduration=1.509057216 podCreationTimestamp="2025-11-22 00:19:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 00:19:47.50894671 +0000 UTC m=+14.188238544" watchObservedRunningTime="2025-11-22 00:19:47.509057216 +0000 UTC m=+14.188349048"
Nov 22 00:19:50 old-k8s-version-462319 kubelet[1521]: I1122 00:19:50.516088 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-ldtd8" podStartSLOduration=1.666002271 podCreationTimestamp="2025-11-22 00:19:46 +0000 UTC" firstStartedPulling="2025-11-22 00:19:47.157978554 +0000 UTC m=+13.837270379" lastFinishedPulling="2025-11-22 00:19:50.007957975 +0000 UTC m=+16.687249802" observedRunningTime="2025-11-22 00:19:50.515675934 +0000 UTC m=+17.194967778" watchObservedRunningTime="2025-11-22 00:19:50.515981694 +0000 UTC m=+17.195273528"
Nov 22 00:20:00 old-k8s-version-462319 kubelet[1521]: I1122 00:20:00.709466 1521 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 22 00:20:00 old-k8s-version-462319 kubelet[1521]: I1122 00:20:00.889924 1521 topology_manager.go:215] "Topology Admit Handler" podUID="fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2" podNamespace="kube-system" podName="storage-provisioner"
Nov 22 00:20:00 old-k8s-version-462319 kubelet[1521]: I1122 00:20:00.892871 1521 topology_manager.go:215] "Topology Admit Handler" podUID="44750e8d-5eeb-4845-9029-a58cbf976b62" podNamespace="kube-system" podName="coredns-5dd5756b68-pqbfp"
Nov 22 00:20:00 old-k8s-version-462319 kubelet[1521]: I1122 00:20:00.993531 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/44750e8d-5eeb-4845-9029-a58cbf976b62-config-volume\") pod \"coredns-5dd5756b68-pqbfp\" (UID: \"44750e8d-5eeb-4845-9029-a58cbf976b62\") " pod="kube-system/coredns-5dd5756b68-pqbfp"
Nov 22 00:20:00 old-k8s-version-462319 kubelet[1521]: I1122 00:20:00.993597 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2-tmp\") pod \"storage-provisioner\" (UID: \"fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2\") " pod="kube-system/storage-provisioner"
Nov 22 00:20:00 old-k8s-version-462319 kubelet[1521]: I1122 00:20:00.993637 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfnhk\" (UniqueName: \"kubernetes.io/projected/44750e8d-5eeb-4845-9029-a58cbf976b62-kube-api-access-pfnhk\") pod \"coredns-5dd5756b68-pqbfp\" (UID: \"44750e8d-5eeb-4845-9029-a58cbf976b62\") " pod="kube-system/coredns-5dd5756b68-pqbfp"
Nov 22 00:20:00 old-k8s-version-462319 kubelet[1521]: I1122 00:20:00.993669 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rj2fz\" (UniqueName: \"kubernetes.io/projected/fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2-kube-api-access-rj2fz\") pod \"storage-provisioner\" (UID: \"fc0f2774-324d-4c1a-97b7-d3e3d30ea8b2\") " pod="kube-system/storage-provisioner"
Nov 22 00:20:01 old-k8s-version-462319 kubelet[1521]: I1122 00:20:01.564512 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=14.564413938 podCreationTimestamp="2025-11-22 00:19:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 00:20:01.564333027 +0000 UTC m=+28.243624860" watchObservedRunningTime="2025-11-22 00:20:01.564413938 +0000 UTC m=+28.243705771"
Nov 22 00:20:01 old-k8s-version-462319 kubelet[1521]: I1122 00:20:01.564659 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-pqbfp" podStartSLOduration=15.564629833 podCreationTimestamp="2025-11-22 00:19:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 00:20:01.551555332 +0000 UTC m=+28.230847165" watchObservedRunningTime="2025-11-22 00:20:01.564629833 +0000 UTC m=+28.243921660"
Nov 22 00:20:04 old-k8s-version-462319 kubelet[1521]: I1122 00:20:04.775067 1521 topology_manager.go:215] "Topology Admit Handler" podUID="89dd9411-148d-4a8e-98d3-a51a8eab9d35" podNamespace="default" podName="busybox"
Nov 22 00:20:04 old-k8s-version-462319 kubelet[1521]: I1122 00:20:04.915405 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7gkx\" (UniqueName: \"kubernetes.io/projected/89dd9411-148d-4a8e-98d3-a51a8eab9d35-kube-api-access-l7gkx\") pod \"busybox\" (UID: \"89dd9411-148d-4a8e-98d3-a51a8eab9d35\") " pod="default/busybox"
Nov 22 00:20:08 old-k8s-version-462319 kubelet[1521]: I1122 00:20:08.563800 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=2.326082204 podCreationTimestamp="2025-11-22 00:20:04 +0000 UTC" firstStartedPulling="2025-11-22 00:20:06.067901148 +0000 UTC m=+32.747192973" lastFinishedPulling="2025-11-22 00:20:08.305570732 +0000 UTC m=+34.984862556" observedRunningTime="2025-11-22 00:20:08.563606355 +0000 UTC m=+35.242898188" watchObservedRunningTime="2025-11-22 00:20:08.563751787 +0000 UTC m=+35.243043620"
==> storage-provisioner [f2a1ec178c227617bd32e678c94e3d44e606683f0b10ccdbc182dec6d6d5c9e9] <==
I1122 00:20:01.401220 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1122 00:20:01.412796 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1122 00:20:01.412842 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1122 00:20:01.421489 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1122 00:20:01.421683 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-462319_fbf5718a-3981-4828-8660-7b6ddab898c0!
I1122 00:20:01.421619 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"d8be93cf-82a7-4f20-a2ea-927b67416b8f", APIVersion:"v1", ResourceVersion:"405", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-462319_fbf5718a-3981-4828-8660-7b6ddab898c0 became leader
I1122 00:20:01.522750 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-462319_fbf5718a-3981-4828-8660-7b6ddab898c0!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-462319 -n old-k8s-version-462319
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-462319 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestStartStop/group/old-k8s-version/serial/DeployApp (13.36s)