=== RUN TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-975700 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [b49caea0-80e8-4473-ac1f-f9bd327c3754] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [b49caea0-80e8-4473-ac1f-f9bd327c3754] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 9.003206197s
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-975700 exec busybox -- /bin/sh -c "ulimit -n"
start_stop_delete_test.go:194: 'ulimit -n' returned 1024, expected 1048576
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-975700
helpers_test.go:243: (dbg) docker inspect old-k8s-version-975700:
-- stdout --
[
{
"Id": "fa1d8405226b204ac72daac6f171881e88b0344b7533643e7e2243a0246fe4ca",
"Created": "2025-11-19T22:19:38.284388499Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 244905,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-19T22:19:38.321569291Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:da868a89527ea3b5fe65ed3ef232d132379e38c55dd4637db2e5af21a1522b2d",
"ResolvConfPath": "/var/lib/docker/containers/fa1d8405226b204ac72daac6f171881e88b0344b7533643e7e2243a0246fe4ca/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/fa1d8405226b204ac72daac6f171881e88b0344b7533643e7e2243a0246fe4ca/hostname",
"HostsPath": "/var/lib/docker/containers/fa1d8405226b204ac72daac6f171881e88b0344b7533643e7e2243a0246fe4ca/hosts",
"LogPath": "/var/lib/docker/containers/fa1d8405226b204ac72daac6f171881e88b0344b7533643e7e2243a0246fe4ca/fa1d8405226b204ac72daac6f171881e88b0344b7533643e7e2243a0246fe4ca-json.log",
"Name": "/old-k8s-version-975700",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-975700:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-975700",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "fa1d8405226b204ac72daac6f171881e88b0344b7533643e7e2243a0246fe4ca",
"LowerDir": "/var/lib/docker/overlay2/82f9fc885f3a15658949bf3138691f10889fccea52145002efd1a4a56c392ddc-init/diff:/var/lib/docker/overlay2/b09480e350abbb2f4f48b19448dc8e9ddd0de679fdb8cd59ebc5b758a29b344e/diff",
"MergedDir": "/var/lib/docker/overlay2/82f9fc885f3a15658949bf3138691f10889fccea52145002efd1a4a56c392ddc/merged",
"UpperDir": "/var/lib/docker/overlay2/82f9fc885f3a15658949bf3138691f10889fccea52145002efd1a4a56c392ddc/diff",
"WorkDir": "/var/lib/docker/overlay2/82f9fc885f3a15658949bf3138691f10889fccea52145002efd1a4a56c392ddc/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-975700",
"Source": "/var/lib/docker/volumes/old-k8s-version-975700/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-975700",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-975700",
"name.minikube.sigs.k8s.io": "old-k8s-version-975700",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "bdcc92270fe5f34f2b3211c596bcb03676f7d021d1ab19d1405cbc9ff65513fb",
"SandboxKey": "/var/run/docker/netns/bdcc92270fe5",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33058"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33059"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33062"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33060"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33061"
}
]
},
"Networks": {
"old-k8s-version-975700": {
"IPAMConfig": {
"IPv4Address": "192.168.94.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "e025fa4e3e969ab94188de7ccce8cf41b046fa1de9b7b2485f5bcca1daedd849",
"EndpointID": "8cbfdb5bbf934780f84e734118116ddf815c2fea44670767c9e66317e265e4f4",
"Gateway": "192.168.94.1",
"IPAddress": "192.168.94.2",
"MacAddress": "e6:6b:48:9f:07:21",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-975700",
"fa1d8405226b"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-975700 -n old-k8s-version-975700
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-975700 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-975700 logs -n 25: (1.056627693s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────────┬─────────┬─────────┬─────────────────────┬───────────
──────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────────┼─────────┼─────────┼─────────────────────┼───────────
──────────┤
│ start │ -p NoKubernetes-836292 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ NoKubernetes-836292 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ 19 Nov 25 22:19 UTC │
│ ssh │ -p cilium-904997 sudo systemctl status containerd --all --full --no-pager │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo systemctl cat containerd --no-pager │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo cat /lib/systemd/system/containerd.service │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo cat /etc/containerd/config.toml │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo containerd config dump │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo systemctl status crio --all --full --no-pager │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo systemctl cat crio --no-pager │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo crio config │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ delete │ -p cilium-904997 │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ 19 Nov 25 22:18 UTC │
│ start │ -p force-systemd-flag-635885 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-flag-635885 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ 19 Nov 25 22:19 UTC │
│ ssh │ -p NoKubernetes-836292 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-836292 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ │
│ ssh │ force-systemd-flag-635885 ssh cat /etc/containerd/config.toml │ force-systemd-flag-635885 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ delete │ -p force-systemd-flag-635885 │ force-systemd-flag-635885 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ stop │ -p NoKubernetes-836292 │ NoKubernetes-836292 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ start │ -p NoKubernetes-836292 --driver=docker --container-runtime=containerd │ NoKubernetes-836292 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ start │ -p cert-options-071115 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-071115 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ ssh │ -p NoKubernetes-836292 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-836292 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ │
│ delete │ -p NoKubernetes-836292 │ NoKubernetes-836292 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ start │ -p old-k8s-version-975700 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-975700 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:20 UTC │
│ ssh │ cert-options-071115 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-071115 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ ssh │ -p cert-options-071115 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-071115 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ delete │ -p cert-options-071115 │ cert-options-071115 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ start │ -p no-preload-638439 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ no-preload-638439 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────────┴─────────┴─────────┴─────────────────────┴───────────
──────────┘
==> Last Start <==
Log file created at: 2025/11/19 22:19:48
Running on machine: ubuntu-20-agent-6
Binary: Built with gc go1.24.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1119 22:19:48.990275 248121 out.go:360] Setting OutFile to fd 1 ...
I1119 22:19:48.990406 248121 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1119 22:19:48.990419 248121 out.go:374] Setting ErrFile to fd 2...
I1119 22:19:48.990423 248121 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1119 22:19:48.990627 248121 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21918-9296/.minikube/bin
I1119 22:19:48.991193 248121 out.go:368] Setting JSON to false
I1119 22:19:48.992321 248121 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-6","uptime":3729,"bootTime":1763587060,"procs":293,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1043-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1119 22:19:48.992426 248121 start.go:143] virtualization: kvm guest
I1119 22:19:48.994475 248121 out.go:179] * [no-preload-638439] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1119 22:19:48.995854 248121 out.go:179] - MINIKUBE_LOCATION=21918
I1119 22:19:48.995867 248121 notify.go:221] Checking for updates...
I1119 22:19:48.998724 248121 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1119 22:19:49.000141 248121 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21918-9296/kubeconfig
I1119 22:19:49.004556 248121 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21918-9296/.minikube
I1119 22:19:49.005782 248121 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1119 22:19:49.006906 248121 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1119 22:19:49.008438 248121 config.go:182] Loaded profile config "cert-expiration-207460": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1119 22:19:49.008559 248121 config.go:182] Loaded profile config "kubernetes-upgrade-133839": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1119 22:19:49.008672 248121 config.go:182] Loaded profile config "old-k8s-version-975700": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1119 22:19:49.008773 248121 driver.go:422] Setting default libvirt URI to qemu:///system
I1119 22:19:49.032838 248121 docker.go:124] docker version: linux-29.0.2:Docker Engine - Community
I1119 22:19:49.032973 248121 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1119 22:19:49.090138 248121 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:65 OomKillDisable:false NGoroutines:76 SystemTime:2025-11-19 22:19:49.078907682 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1043-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.0] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1119 22:19:49.090254 248121 docker.go:319] overlay module found
I1119 22:19:49.091878 248121 out.go:179] * Using the docker driver based on user configuration
I1119 22:19:49.093038 248121 start.go:309] selected driver: docker
I1119 22:19:49.093053 248121 start.go:930] validating driver "docker" against <nil>
I1119 22:19:49.093064 248121 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1119 22:19:49.093625 248121 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1119 22:19:49.156775 248121 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:65 OomKillDisable:false NGoroutines:76 SystemTime:2025-11-19 22:19:49.145211302 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1043-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.0] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1119 22:19:49.157058 248121 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1119 22:19:49.157439 248121 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1119 22:19:49.159270 248121 out.go:179] * Using Docker driver with root privileges
I1119 22:19:49.160689 248121 cni.go:84] Creating CNI manager for ""
I1119 22:19:49.160762 248121 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1119 22:19:49.160776 248121 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1119 22:19:49.160859 248121 start.go:353] cluster config:
{Name:no-preload-638439 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-638439 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Containe
rRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1119 22:19:49.162538 248121 out.go:179] * Starting "no-preload-638439" primary control-plane node in "no-preload-638439" cluster
I1119 22:19:49.165506 248121 cache.go:134] Beginning downloading kic base image for docker with containerd
I1119 22:19:49.166733 248121 out.go:179] * Pulling base image v0.0.48-1763561786-21918 ...
I1119 22:19:49.168220 248121 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1119 22:19:49.168286 248121 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 in local docker daemon
I1119 22:19:49.168353 248121 profile.go:143] Saving config to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/config.json ...
I1119 22:19:49.168395 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/config.json: {Name:mk80aa81bbdb1209c6edea855d376fb83f4d3158 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:19:49.168457 248121 cache.go:107] acquiring lock: {Name:mk3047e241e868539f7fa71732db2494bd5accac Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.168492 248121 cache.go:107] acquiring lock: {Name:mkfa0cff605af699ff39a13e0c5b50d01194658e Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.168527 248121 cache.go:107] acquiring lock: {Name:mk97f6c43b208e1a8e4ae123374c490c517b3f77 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.168548 248121 cache.go:115] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 exists
I1119 22:19:49.168561 248121 cache.go:96] cache image "gcr.io/k8s-minikube/storage-provisioner:v5" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5" took 131.881µs
I1119 22:19:49.168577 248121 cache.go:80] save to tar file gcr.io/k8s-minikube/storage-provisioner:v5 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 succeeded
I1119 22:19:49.168586 248121 cache.go:107] acquiring lock: {Name:mk95307f4a2dfa9e7a1dbc92b6b01bf8659d9b13 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.168623 248121 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:49.168652 248121 cache.go:107] acquiring lock: {Name:mk07d9df97c614ffb0affecc21609079d8bc04b5 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.168677 248121 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:49.168687 248121 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:49.168749 248121 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1119 22:19:49.169004 248121 cache.go:107] acquiring lock: {Name:mk5d2dd3f2b18e53fa90921f4c0c75406a912168 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.169610 248121 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:49.169116 248121 cache.go:107] acquiring lock: {Name:mkabd0eddb0cd66931eabcbabac2ddbe82464607 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.170495 248121 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:49.169136 248121 cache.go:107] acquiring lock: {Name:mkc18e74e5d25fdb795ed308cf7ce3da142a9be0 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.170703 248121 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1119 22:19:49.171552 248121 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:49.171558 248121 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1119 22:19:49.171569 248121 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:49.171576 248121 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:49.172459 248121 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:49.172478 248121 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1119 22:19:49.172507 248121 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:49.200114 248121 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 in local docker daemon, skipping pull
I1119 22:19:49.200187 248121 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 exists in daemon, skipping load
I1119 22:19:49.200226 248121 cache.go:243] Successfully downloaded all kic artifacts
I1119 22:19:49.200265 248121 start.go:360] acquireMachinesLock for no-preload-638439: {Name:mk6b4dc7fd24c69d9288f594d61933b094ed5442 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.200436 248121 start.go:364] duration metric: took 142.192µs to acquireMachinesLock for "no-preload-638439"
I1119 22:19:49.200608 248121 start.go:93] Provisioning new machine with config: &{Name:no-preload-638439 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-638439 Namespace:default APIServerHAVIP: APIServer
Name:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1119 22:19:49.200727 248121 start.go:125] createHost starting for "" (driver="docker")
I1119 22:19:46.119049 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:19:46.119476 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1119 22:19:46.119522 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:19:46.119566 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:19:46.151572 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:46.151601 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:46.151607 216336 cri.go:89] found id: ""
I1119 22:19:46.151617 216336 logs.go:282] 2 containers: [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:19:46.151687 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:46.155958 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:46.160473 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:19:46.160530 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:19:46.191589 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:46.191612 216336 cri.go:89] found id: ""
I1119 22:19:46.191619 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:19:46.191670 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:46.196383 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:19:46.196437 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:19:46.225509 216336 cri.go:89] found id: ""
I1119 22:19:46.225529 216336 logs.go:282] 0 containers: []
W1119 22:19:46.225540 216336 logs.go:284] No container was found matching "coredns"
I1119 22:19:46.225546 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:19:46.225599 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:19:46.254866 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:46.254913 216336 cri.go:89] found id: ""
I1119 22:19:46.254924 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:19:46.254979 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:46.259701 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:19:46.259765 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:19:46.292564 216336 cri.go:89] found id: ""
I1119 22:19:46.292591 216336 logs.go:282] 0 containers: []
W1119 22:19:46.292601 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:19:46.292608 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:19:46.292667 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:19:46.329564 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:46.329596 216336 cri.go:89] found id: ""
I1119 22:19:46.329606 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:19:46.329667 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:46.335222 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:19:46.335276 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:19:46.367004 216336 cri.go:89] found id: ""
I1119 22:19:46.367028 216336 logs.go:282] 0 containers: []
W1119 22:19:46.367039 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:19:46.367047 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:19:46.367105 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:19:46.399927 216336 cri.go:89] found id: ""
I1119 22:19:46.399974 216336 logs.go:282] 0 containers: []
W1119 22:19:46.399984 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:19:46.400002 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:19:46.400017 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:19:46.463044 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:19:46.463068 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:19:46.463083 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:46.497691 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:19:46.497718 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:46.535424 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:19:46.535455 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:46.575124 216336 logs.go:123] Gathering logs for container status ...
I1119 22:19:46.575154 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:19:46.607742 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:19:46.607769 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:19:46.710299 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:19:46.710332 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:19:46.724051 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:19:46.724080 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:46.762457 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:19:46.762489 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:46.803568 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:19:46.803601 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:19:49.354660 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:19:49.355043 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1119 22:19:49.355109 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:19:49.355169 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:19:49.395681 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:49.395705 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:49.395709 216336 cri.go:89] found id: ""
I1119 22:19:49.395716 216336 logs.go:282] 2 containers: [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:19:49.395781 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:49.403424 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:49.410799 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:19:49.410949 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:19:49.452918 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:49.452941 216336 cri.go:89] found id: ""
I1119 22:19:49.452952 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:19:49.453011 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:49.458252 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:19:49.458323 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:19:49.497813 216336 cri.go:89] found id: ""
I1119 22:19:49.497837 216336 logs.go:282] 0 containers: []
W1119 22:19:49.497855 216336 logs.go:284] No container was found matching "coredns"
I1119 22:19:49.497863 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:19:49.497929 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:19:49.533334 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:49.533350 216336 cri.go:89] found id: ""
I1119 22:19:49.533357 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:19:49.533399 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:49.537784 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:19:49.537858 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:19:49.568018 216336 cri.go:89] found id: ""
I1119 22:19:49.568044 216336 logs.go:282] 0 containers: []
W1119 22:19:49.568056 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:19:49.568063 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:19:49.568119 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:19:49.609525 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:49.609556 216336 cri.go:89] found id: ""
I1119 22:19:49.609566 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:19:49.609626 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:49.616140 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:19:49.616211 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:19:49.655231 216336 cri.go:89] found id: ""
I1119 22:19:49.655262 216336 logs.go:282] 0 containers: []
W1119 22:19:49.655272 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:19:49.655279 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:19:49.655333 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:19:49.689095 216336 cri.go:89] found id: ""
I1119 22:19:49.689153 216336 logs.go:282] 0 containers: []
W1119 22:19:49.689165 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:19:49.689184 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:19:49.689221 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:19:49.810665 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:19:49.810701 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:19:49.901949 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:19:49.901999 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:19:49.902017 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:49.959095 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:19:49.959128 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:50.003553 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:19:50.003592 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:19:50.058586 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:19:50.058623 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:19:50.074307 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:19:50.074340 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:50.111045 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:19:50.111081 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:50.150599 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:19:50.150632 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:50.185189 216336 logs.go:123] Gathering logs for container status ...
I1119 22:19:50.185216 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:19:48.204748 244005 out.go:252] - Booting up control plane ...
I1119 22:19:48.204897 244005 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1119 22:19:48.205005 244005 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1119 22:19:48.206240 244005 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1119 22:19:48.231808 244005 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1119 22:19:48.232853 244005 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1119 22:19:48.232929 244005 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1119 22:19:48.338373 244005 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1119 22:19:49.203330 248121 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1119 22:19:49.203668 248121 start.go:159] libmachine.API.Create for "no-preload-638439" (driver="docker")
I1119 22:19:49.203755 248121 client.go:173] LocalClient.Create starting
I1119 22:19:49.203905 248121 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca.pem
I1119 22:19:49.203977 248121 main.go:143] libmachine: Decoding PEM data...
I1119 22:19:49.204016 248121 main.go:143] libmachine: Parsing certificate...
I1119 22:19:49.204103 248121 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21918-9296/.minikube/certs/cert.pem
I1119 22:19:49.204159 248121 main.go:143] libmachine: Decoding PEM data...
I1119 22:19:49.204190 248121 main.go:143] libmachine: Parsing certificate...
I1119 22:19:49.204684 248121 cli_runner.go:164] Run: docker network inspect no-preload-638439 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1119 22:19:49.233073 248121 cli_runner.go:211] docker network inspect no-preload-638439 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1119 22:19:49.233150 248121 network_create.go:284] running [docker network inspect no-preload-638439] to gather additional debugging logs...
I1119 22:19:49.233181 248121 cli_runner.go:164] Run: docker network inspect no-preload-638439
W1119 22:19:49.260692 248121 cli_runner.go:211] docker network inspect no-preload-638439 returned with exit code 1
I1119 22:19:49.260724 248121 network_create.go:287] error running [docker network inspect no-preload-638439]: docker network inspect no-preload-638439: exit status 1
stdout:
[]
stderr:
Error response from daemon: network no-preload-638439 not found
I1119 22:19:49.260740 248121 network_create.go:289] output of [docker network inspect no-preload-638439]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network no-preload-638439 not found
** /stderr **
I1119 22:19:49.260835 248121 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1119 22:19:49.281699 248121 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-02d9279961e7 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:f2:f0:7b:99:dd:08} reservation:<nil>}
I1119 22:19:49.282496 248121 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-474134d72c89 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:ee:14:41:ce:21:e4} reservation:<nil>}
I1119 22:19:49.283428 248121 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-527206f47d61 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:66:ef:fd:4c:e4:1b} reservation:<nil>}
I1119 22:19:49.284394 248121 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-ac16fd64007f IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:16:dc:21:09:78:e5} reservation:<nil>}
I1119 22:19:49.285073 248121 network.go:211] skipping subnet 192.168.85.0/24 that is taken: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName:br-11547e9c7cf3 IfaceIPv4:192.168.85.1 IfaceMTU:1500 IfaceMAC:a2:49:21:10:91:74} reservation:<nil>}
I1119 22:19:49.286118 248121 network.go:211] skipping subnet 192.168.94.0/24 that is taken: &{IP:192.168.94.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.94.0/24 Gateway:192.168.94.1 ClientMin:192.168.94.2 ClientMax:192.168.94.254 Broadcast:192.168.94.255 IsPrivate:true Interface:{IfaceName:br-e025fa4e3e96 IfaceIPv4:192.168.94.1 IfaceMTU:1500 IfaceMAC:c2:19:71:ce:4a:3c} reservation:<nil>}
I1119 22:19:49.287275 248121 network.go:206] using free private subnet 192.168.103.0/24: &{IP:192.168.103.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.103.0/24 Gateway:192.168.103.1 ClientMin:192.168.103.2 ClientMax:192.168.103.254 Broadcast:192.168.103.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001e92190}
I1119 22:19:49.287353 248121 network_create.go:124] attempt to create docker network no-preload-638439 192.168.103.0/24 with gateway 192.168.103.1 and MTU of 1500 ...
I1119 22:19:49.287448 248121 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.103.0/24 --gateway=192.168.103.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=no-preload-638439 no-preload-638439
I1119 22:19:49.349621 248121 cache.go:162] opening: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1119 22:19:49.349748 248121 network_create.go:108] docker network no-preload-638439 192.168.103.0/24 created
I1119 22:19:49.349780 248121 kic.go:121] calculated static IP "192.168.103.2" for the "no-preload-638439" container
I1119 22:19:49.349859 248121 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1119 22:19:49.350149 248121 cache.go:162] opening: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1119 22:19:49.361305 248121 cache.go:162] opening: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1119 22:19:49.363150 248121 cache.go:162] opening: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1119 22:19:49.375619 248121 cli_runner.go:164] Run: docker volume create no-preload-638439 --label name.minikube.sigs.k8s.io=no-preload-638439 --label created_by.minikube.sigs.k8s.io=true
I1119 22:19:49.389385 248121 cache.go:162] opening: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1119 22:19:49.396358 248121 cache.go:162] opening: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1119 22:19:49.402036 248121 oci.go:103] Successfully created a docker volume no-preload-638439
I1119 22:19:49.402119 248121 cli_runner.go:164] Run: docker run --rm --name no-preload-638439-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-638439 --entrypoint /usr/bin/test -v no-preload-638439:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 -d /var/lib
I1119 22:19:49.404338 248121 cache.go:162] opening: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1119 22:19:49.471774 248121 cache.go:157] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 exists
I1119 22:19:49.471808 248121 cache.go:96] cache image "registry.k8s.io/pause:3.10.1" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1" took 303.216742ms
I1119 22:19:49.471832 248121 cache.go:80] save to tar file registry.k8s.io/pause:3.10.1 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 succeeded
I1119 22:19:49.854076 248121 cache.go:157] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 exists
I1119 22:19:49.854102 248121 cache.go:96] cache image "registry.k8s.io/kube-proxy:v1.34.1" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1" took 685.635122ms
I1119 22:19:49.854114 248121 cache.go:80] save to tar file registry.k8s.io/kube-proxy:v1.34.1 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 succeeded
I1119 22:19:49.969965 248121 oci.go:107] Successfully prepared a docker volume no-preload-638439
I1119 22:19:49.970027 248121 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
W1119 22:19:49.970211 248121 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1119 22:19:49.970251 248121 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1119 22:19:49.970298 248121 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1119 22:19:50.046746 248121 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname no-preload-638439 --name no-preload-638439 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-638439 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=no-preload-638439 --network no-preload-638439 --ip 192.168.103.2 --volume no-preload-638439:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865
I1119 22:19:50.374513 248121 cli_runner.go:164] Run: docker container inspect no-preload-638439 --format={{.State.Running}}
I1119 22:19:50.397354 248121 cli_runner.go:164] Run: docker container inspect no-preload-638439 --format={{.State.Status}}
I1119 22:19:50.420153 248121 cli_runner.go:164] Run: docker exec no-preload-638439 stat /var/lib/dpkg/alternatives/iptables
I1119 22:19:50.480826 248121 oci.go:144] the created container "no-preload-638439" has a running status.
I1119 22:19:50.480855 248121 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa...
I1119 22:19:50.741014 248121 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1119 22:19:50.777653 248121 cli_runner.go:164] Run: docker container inspect no-preload-638439 --format={{.State.Status}}
I1119 22:19:50.805773 248121 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1119 22:19:50.805802 248121 kic_runner.go:114] Args: [docker exec --privileged no-preload-638439 chown docker:docker /home/docker/.ssh/authorized_keys]
I1119 22:19:50.864742 248121 cli_runner.go:164] Run: docker container inspect no-preload-638439 --format={{.State.Status}}
I1119 22:19:50.878812 248121 cache.go:157] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 exists
I1119 22:19:50.878846 248121 cache.go:96] cache image "registry.k8s.io/kube-apiserver:v1.34.1" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1" took 1.709887948s
I1119 22:19:50.878866 248121 cache.go:80] save to tar file registry.k8s.io/kube-apiserver:v1.34.1 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 succeeded
I1119 22:19:50.883024 248121 cache.go:157] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 exists
I1119 22:19:50.883052 248121 cache.go:96] cache image "registry.k8s.io/kube-controller-manager:v1.34.1" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1" took 1.714530905s
I1119 22:19:50.883067 248121 cache.go:80] save to tar file registry.k8s.io/kube-controller-manager:v1.34.1 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 succeeded
I1119 22:19:50.889090 248121 cache.go:157] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 exists
I1119 22:19:50.889119 248121 cache.go:96] cache image "registry.k8s.io/kube-scheduler:v1.34.1" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1" took 1.72053761s
I1119 22:19:50.889134 248121 cache.go:80] save to tar file registry.k8s.io/kube-scheduler:v1.34.1 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 succeeded
I1119 22:19:50.890545 248121 machine.go:94] provisionDockerMachine start ...
I1119 22:19:50.890654 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:50.917029 248121 main.go:143] libmachine: Using SSH client type: native
I1119 22:19:50.917372 248121 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1119 22:19:50.917394 248121 main.go:143] libmachine: About to run SSH command:
hostname
I1119 22:19:50.918143 248121 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:41082->127.0.0.1:33063: read: connection reset by peer
I1119 22:19:50.954753 248121 cache.go:157] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 exists
I1119 22:19:50.954786 248121 cache.go:96] cache image "registry.k8s.io/coredns/coredns:v1.12.1" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1" took 1.785730546s
I1119 22:19:50.954801 248121 cache.go:80] save to tar file registry.k8s.io/coredns/coredns:v1.12.1 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 succeeded
I1119 22:19:51.295575 248121 cache.go:157] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 exists
I1119 22:19:51.295602 248121 cache.go:96] cache image "registry.k8s.io/etcd:3.6.4-0" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0" took 2.126530323s
I1119 22:19:51.295614 248121 cache.go:80] save to tar file registry.k8s.io/etcd:3.6.4-0 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 succeeded
I1119 22:19:51.295629 248121 cache.go:87] Successfully saved all images to host disk.
I1119 22:19:53.340728 244005 kubeadm.go:319] [apiclient] All control plane components are healthy after 5.002509 seconds
I1119 22:19:53.340920 244005 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1119 22:19:53.353852 244005 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1119 22:19:53.877436 244005 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1119 22:19:53.877630 244005 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-975700 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1119 22:19:54.388156 244005 kubeadm.go:319] [bootstrap-token] Using token: cb0uuv.ole7whobrm4tnmeu
I1119 22:19:54.389814 244005 out.go:252] - Configuring RBAC rules ...
I1119 22:19:54.389996 244005 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1119 22:19:54.396226 244005 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1119 22:19:54.404040 244005 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1119 22:19:54.407336 244005 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1119 22:19:54.410095 244005 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1119 22:19:54.412761 244005 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1119 22:19:54.424912 244005 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1119 22:19:54.627091 244005 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1119 22:19:54.803149 244005 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1119 22:19:54.807538 244005 kubeadm.go:319]
I1119 22:19:54.807624 244005 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1119 22:19:54.807631 244005 kubeadm.go:319]
I1119 22:19:54.807719 244005 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1119 22:19:54.807724 244005 kubeadm.go:319]
I1119 22:19:54.807753 244005 kubeadm.go:319] mkdir -p $HOME/.kube
I1119 22:19:54.807821 244005 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1119 22:19:54.807898 244005 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1119 22:19:54.807905 244005 kubeadm.go:319]
I1119 22:19:54.807968 244005 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1119 22:19:54.807973 244005 kubeadm.go:319]
I1119 22:19:54.808037 244005 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1119 22:19:54.808042 244005 kubeadm.go:319]
I1119 22:19:54.808105 244005 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1119 22:19:54.808197 244005 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1119 22:19:54.808278 244005 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1119 22:19:54.808283 244005 kubeadm.go:319]
I1119 22:19:54.808378 244005 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1119 22:19:54.808482 244005 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1119 22:19:54.808488 244005 kubeadm.go:319]
I1119 22:19:54.808581 244005 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token cb0uuv.ole7whobrm4tnmeu \
I1119 22:19:54.808697 244005 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:6123875ff628fb9eedbd72f2253477865aa197083b84a1d60cb6c00de308bc63 \
I1119 22:19:54.808745 244005 kubeadm.go:319] --control-plane
I1119 22:19:54.808753 244005 kubeadm.go:319]
I1119 22:19:54.808860 244005 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1119 22:19:54.808867 244005 kubeadm.go:319]
I1119 22:19:54.808978 244005 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token cb0uuv.ole7whobrm4tnmeu \
I1119 22:19:54.809119 244005 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:6123875ff628fb9eedbd72f2253477865aa197083b84a1d60cb6c00de308bc63
I1119 22:19:54.812703 244005 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1043-gcp\n", err: exit status 1
I1119 22:19:54.812825 244005 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1119 22:19:54.812852 244005 cni.go:84] Creating CNI manager for ""
I1119 22:19:54.812906 244005 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1119 22:19:54.814910 244005 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1119 22:19:52.733247 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:19:52.733770 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1119 22:19:52.733821 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:19:52.733900 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:19:52.766790 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:52.766819 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:52.766824 216336 cri.go:89] found id: ""
I1119 22:19:52.766834 216336 logs.go:282] 2 containers: [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:19:52.766917 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:52.771725 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:52.776283 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:19:52.776357 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:19:52.808152 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:52.808179 216336 cri.go:89] found id: ""
I1119 22:19:52.808190 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:19:52.808260 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:52.812851 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:19:52.812954 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:19:52.844459 216336 cri.go:89] found id: ""
I1119 22:19:52.844483 216336 logs.go:282] 0 containers: []
W1119 22:19:52.844492 216336 logs.go:284] No container was found matching "coredns"
I1119 22:19:52.844499 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:19:52.844560 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:19:52.875911 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:52.875939 216336 cri.go:89] found id: ""
I1119 22:19:52.875948 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:19:52.876008 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:52.880449 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:19:52.880526 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:19:52.913101 216336 cri.go:89] found id: ""
I1119 22:19:52.913139 216336 logs.go:282] 0 containers: []
W1119 22:19:52.913150 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:19:52.913158 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:19:52.913240 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:19:52.945143 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:52.945172 216336 cri.go:89] found id: ""
I1119 22:19:52.945182 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:19:52.945240 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:52.949921 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:19:52.950006 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:19:52.984180 216336 cri.go:89] found id: ""
I1119 22:19:52.984214 216336 logs.go:282] 0 containers: []
W1119 22:19:52.984225 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:19:52.984233 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:19:52.984296 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:19:53.016636 216336 cri.go:89] found id: ""
I1119 22:19:53.016661 216336 logs.go:282] 0 containers: []
W1119 22:19:53.016671 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:19:53.016691 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:19:53.016707 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:53.053700 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:19:53.053730 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:53.088889 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:19:53.088922 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:19:53.104350 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:19:53.104378 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:19:53.165418 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:19:53.165442 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:19:53.165460 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:53.197214 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:19:53.197252 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:53.228109 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:19:53.228145 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:53.261694 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:19:53.261727 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:19:53.302850 216336 logs.go:123] Gathering logs for container status ...
I1119 22:19:53.302891 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:19:53.333442 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:19:53.333466 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:19:54.046074 248121 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-638439
I1119 22:19:54.046106 248121 ubuntu.go:182] provisioning hostname "no-preload-638439"
I1119 22:19:54.046172 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:54.065777 248121 main.go:143] libmachine: Using SSH client type: native
I1119 22:19:54.066044 248121 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1119 22:19:54.066060 248121 main.go:143] libmachine: About to run SSH command:
sudo hostname no-preload-638439 && echo "no-preload-638439" | sudo tee /etc/hostname
I1119 22:19:54.204707 248121 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-638439
I1119 22:19:54.204779 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:54.223401 248121 main.go:143] libmachine: Using SSH client type: native
I1119 22:19:54.223669 248121 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1119 22:19:54.223696 248121 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sno-preload-638439' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 no-preload-638439/g' /etc/hosts;
else
echo '127.0.1.1 no-preload-638439' | sudo tee -a /etc/hosts;
fi
fi
I1119 22:19:54.352178 248121 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1119 22:19:54.352206 248121 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21918-9296/.minikube CaCertPath:/home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21918-9296/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21918-9296/.minikube}
I1119 22:19:54.352222 248121 ubuntu.go:190] setting up certificates
I1119 22:19:54.352230 248121 provision.go:84] configureAuth start
I1119 22:19:54.352301 248121 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-638439
I1119 22:19:54.371286 248121 provision.go:143] copyHostCerts
I1119 22:19:54.371354 248121 exec_runner.go:144] found /home/jenkins/minikube-integration/21918-9296/.minikube/ca.pem, removing ...
I1119 22:19:54.371370 248121 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21918-9296/.minikube/ca.pem
I1119 22:19:54.371451 248121 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21918-9296/.minikube/ca.pem (1078 bytes)
I1119 22:19:54.371570 248121 exec_runner.go:144] found /home/jenkins/minikube-integration/21918-9296/.minikube/cert.pem, removing ...
I1119 22:19:54.371582 248121 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21918-9296/.minikube/cert.pem
I1119 22:19:54.371623 248121 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21918-9296/.minikube/cert.pem (1123 bytes)
I1119 22:19:54.371701 248121 exec_runner.go:144] found /home/jenkins/minikube-integration/21918-9296/.minikube/key.pem, removing ...
I1119 22:19:54.371710 248121 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21918-9296/.minikube/key.pem
I1119 22:19:54.371748 248121 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21918-9296/.minikube/key.pem (1679 bytes)
I1119 22:19:54.371818 248121 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21918-9296/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca-key.pem org=jenkins.no-preload-638439 san=[127.0.0.1 192.168.103.2 localhost minikube no-preload-638439]
I1119 22:19:54.471021 248121 provision.go:177] copyRemoteCerts
I1119 22:19:54.471092 248121 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1119 22:19:54.471126 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:54.492235 248121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa Username:docker}
I1119 22:19:54.594331 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1119 22:19:54.619378 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1119 22:19:54.640347 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1119 22:19:54.663269 248121 provision.go:87] duration metric: took 311.007703ms to configureAuth
I1119 22:19:54.663306 248121 ubuntu.go:206] setting minikube options for container-runtime
I1119 22:19:54.663514 248121 config.go:182] Loaded profile config "no-preload-638439": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1119 22:19:54.663528 248121 machine.go:97] duration metric: took 3.772952055s to provisionDockerMachine
I1119 22:19:54.663538 248121 client.go:176] duration metric: took 5.459757711s to LocalClient.Create
I1119 22:19:54.663558 248121 start.go:167] duration metric: took 5.459889493s to libmachine.API.Create "no-preload-638439"
I1119 22:19:54.663572 248121 start.go:293] postStartSetup for "no-preload-638439" (driver="docker")
I1119 22:19:54.663584 248121 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1119 22:19:54.663643 248121 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1119 22:19:54.663702 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:54.693309 248121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa Username:docker}
I1119 22:19:54.794533 248121 ssh_runner.go:195] Run: cat /etc/os-release
I1119 22:19:54.799614 248121 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1119 22:19:54.799652 248121 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1119 22:19:54.799667 248121 filesync.go:126] Scanning /home/jenkins/minikube-integration/21918-9296/.minikube/addons for local assets ...
I1119 22:19:54.799750 248121 filesync.go:126] Scanning /home/jenkins/minikube-integration/21918-9296/.minikube/files for local assets ...
I1119 22:19:54.799853 248121 filesync.go:149] local asset: /home/jenkins/minikube-integration/21918-9296/.minikube/files/etc/ssl/certs/128212.pem -> 128212.pem in /etc/ssl/certs
I1119 22:19:54.800010 248121 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1119 22:19:54.811703 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/files/etc/ssl/certs/128212.pem --> /etc/ssl/certs/128212.pem (1708 bytes)
I1119 22:19:54.833815 248121 start.go:296] duration metric: took 170.228401ms for postStartSetup
I1119 22:19:54.834269 248121 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-638439
I1119 22:19:54.855648 248121 profile.go:143] Saving config to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/config.json ...
I1119 22:19:54.855997 248121 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1119 22:19:54.856065 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:54.875839 248121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa Username:docker}
I1119 22:19:54.971298 248121 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1119 22:19:54.976558 248121 start.go:128] duration metric: took 5.775804384s to createHost
I1119 22:19:54.976584 248121 start.go:83] releasing machines lock for "no-preload-638439", held for 5.775996243s
I1119 22:19:54.976652 248121 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-638439
I1119 22:19:54.996323 248121 ssh_runner.go:195] Run: cat /version.json
I1119 22:19:54.996379 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:54.996397 248121 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1119 22:19:54.996468 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:55.015498 248121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa Username:docker}
I1119 22:19:55.015796 248121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa Username:docker}
I1119 22:19:55.110222 248121 ssh_runner.go:195] Run: systemctl --version
I1119 22:19:55.167157 248121 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1119 22:19:55.172373 248121 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1119 22:19:55.172445 248121 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1119 22:19:55.200823 248121 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1119 22:19:55.200849 248121 start.go:496] detecting cgroup driver to use...
I1119 22:19:55.200917 248121 detect.go:190] detected "systemd" cgroup driver on host os
I1119 22:19:55.200971 248121 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1119 22:19:55.216429 248121 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1119 22:19:55.230198 248121 docker.go:218] disabling cri-docker service (if available) ...
I1119 22:19:55.230259 248121 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1119 22:19:55.247760 248121 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1119 22:19:55.266193 248121 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1119 22:19:55.355176 248121 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1119 22:19:55.456550 248121 docker.go:234] disabling docker service ...
I1119 22:19:55.456609 248121 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1119 22:19:55.479653 248121 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1119 22:19:55.493533 248121 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1119 22:19:55.592560 248121 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1119 22:19:55.702080 248121 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1119 22:19:55.719351 248121 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1119 22:19:55.735307 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1119 22:19:55.748222 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1119 22:19:55.759552 248121 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1119 22:19:55.759604 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1119 22:19:55.771633 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1119 22:19:55.782179 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1119 22:19:55.791940 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1119 22:19:55.801486 248121 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1119 22:19:55.810671 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1119 22:19:55.820637 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1119 22:19:55.830057 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1119 22:19:55.839605 248121 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1119 22:19:55.847930 248121 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1119 22:19:55.856300 248121 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1119 22:19:55.943868 248121 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1119 22:19:56.031481 248121 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1119 22:19:56.031555 248121 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1119 22:19:56.036560 248121 start.go:564] Will wait 60s for crictl version
I1119 22:19:56.036619 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.040772 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1119 22:19:56.068661 248121 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1119 22:19:56.068728 248121 ssh_runner.go:195] Run: containerd --version
I1119 22:19:56.092486 248121 ssh_runner.go:195] Run: containerd --version
I1119 22:19:56.118002 248121 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
I1119 22:19:54.816277 244005 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1119 22:19:54.820558 244005 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1119 22:19:54.820581 244005 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1119 22:19:54.833857 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1119 22:19:55.525202 244005 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1119 22:19:55.525370 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:55.525485 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-975700 minikube.k8s.io/updated_at=2025_11_19T22_19_55_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=08454a179ffa60c8ae500105aac58654b5cdef58 minikube.k8s.io/name=old-k8s-version-975700 minikube.k8s.io/primary=true
I1119 22:19:55.543472 244005 ops.go:34] apiserver oom_adj: -16
I1119 22:19:55.632765 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:56.133706 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:56.632860 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:57.133046 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:56.119594 248121 cli_runner.go:164] Run: docker network inspect no-preload-638439 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1119 22:19:56.139074 248121 ssh_runner.go:195] Run: grep 192.168.103.1 host.minikube.internal$ /etc/hosts
I1119 22:19:56.143662 248121 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.103.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1119 22:19:56.156640 248121 kubeadm.go:884] updating cluster {Name:no-preload-638439 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-638439 Namespace:default APIServerHAVIP: APIServerName:minikubeCA API
ServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemu
FirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1119 22:19:56.156774 248121 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1119 22:19:56.156835 248121 ssh_runner.go:195] Run: sudo crictl images --output json
I1119 22:19:56.185228 248121 containerd.go:623] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.34.1". assuming images are not preloaded.
I1119 22:19:56.185258 248121 cache_images.go:90] LoadCachedImages start: [registry.k8s.io/kube-apiserver:v1.34.1 registry.k8s.io/kube-controller-manager:v1.34.1 registry.k8s.io/kube-scheduler:v1.34.1 registry.k8s.io/kube-proxy:v1.34.1 registry.k8s.io/pause:3.10.1 registry.k8s.io/etcd:3.6.4-0 registry.k8s.io/coredns/coredns:v1.12.1 gcr.io/k8s-minikube/storage-provisioner:v5]
I1119 22:19:56.185326 248121 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:56.185359 248121 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:56.185391 248121 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1119 22:19:56.185403 248121 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1119 22:19:56.185415 248121 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:56.185453 248121 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:56.185334 248121 image.go:138] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:19:56.185400 248121 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:56.186856 248121 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:56.186874 248121 image.go:181] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:19:56.186979 248121 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:56.186979 248121 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:56.187070 248121 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1119 22:19:56.187094 248121 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:56.187129 248121 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1119 22:19:56.187150 248121 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:56.332716 248121 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.34.1" and sha "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f"
I1119 22:19:56.332783 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:56.332809 248121 containerd.go:267] Checking existence of image with name "registry.k8s.io/coredns/coredns:v1.12.1" and sha "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969"
I1119 22:19:56.332864 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:56.335699 248121 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.34.1" and sha "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97"
I1119 22:19:56.335755 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:56.343400 248121 containerd.go:267] Checking existence of image with name "registry.k8s.io/etcd:3.6.4-0" and sha "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115"
I1119 22:19:56.343484 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/etcd:3.6.4-0
I1119 22:19:56.354423 248121 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.34.1" and sha "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813"
I1119 22:19:56.354489 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:56.357606 248121 cache_images.go:118] "registry.k8s.io/kube-controller-manager:v1.34.1" needs transfer: "registry.k8s.io/kube-controller-manager:v1.34.1" does not exist at hash "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f" in container runtime
I1119 22:19:56.357630 248121 cache_images.go:118] "registry.k8s.io/coredns/coredns:v1.12.1" needs transfer: "registry.k8s.io/coredns/coredns:v1.12.1" does not exist at hash "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969" in container runtime
I1119 22:19:56.357659 248121 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:56.357662 248121 cri.go:218] Removing image: registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:56.357709 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.357709 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.359708 248121 cache_images.go:118] "registry.k8s.io/kube-apiserver:v1.34.1" needs transfer: "registry.k8s.io/kube-apiserver:v1.34.1" does not exist at hash "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97" in container runtime
I1119 22:19:56.359750 248121 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:56.359792 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.365141 248121 containerd.go:267] Checking existence of image with name "registry.k8s.io/pause:3.10.1" and sha "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f"
I1119 22:19:56.365211 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/pause:3.10.1
I1119 22:19:56.370262 248121 cache_images.go:118] "registry.k8s.io/etcd:3.6.4-0" needs transfer: "registry.k8s.io/etcd:3.6.4-0" does not exist at hash "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115" in container runtime
I1119 22:19:56.370317 248121 cri.go:218] Removing image: registry.k8s.io/etcd:3.6.4-0
I1119 22:19:56.370368 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.380909 248121 cache_images.go:118] "registry.k8s.io/kube-scheduler:v1.34.1" needs transfer: "registry.k8s.io/kube-scheduler:v1.34.1" does not exist at hash "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813" in container runtime
I1119 22:19:56.380976 248121 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:56.381006 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:56.381021 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.381050 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:56.381079 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:56.387736 248121 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.34.1" and sha "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7"
I1119 22:19:56.387826 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:56.388049 248121 cache_images.go:118] "registry.k8s.io/pause:3.10.1" needs transfer: "registry.k8s.io/pause:3.10.1" does not exist at hash "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f" in container runtime
I1119 22:19:56.388093 248121 cri.go:218] Removing image: registry.k8s.io/pause:3.10.1
I1119 22:19:56.388134 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:56.388139 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.388097 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1119 22:19:56.419491 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:56.419632 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:56.422653 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:56.424802 248121 cache_images.go:118] "registry.k8s.io/kube-proxy:v1.34.1" needs transfer: "registry.k8s.io/kube-proxy:v1.34.1" does not exist at hash "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7" in container runtime
I1119 22:19:56.424851 248121 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:56.424918 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.426559 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1119 22:19:56.426657 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:56.426745 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1119 22:19:56.457323 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:56.459754 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:56.459823 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:56.459928 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:56.464385 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1119 22:19:56.464524 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1119 22:19:56.464526 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:56.499739 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1119 22:19:56.499837 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1119 22:19:56.504038 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1119 22:19:56.504120 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1
I1119 22:19:56.504047 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:56.504087 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1119 22:19:56.504256 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1
I1119 22:19:56.507722 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1119 22:19:56.507817 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0
I1119 22:19:56.507959 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1119 22:19:56.508035 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-controller-manager_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-controller-manager_v1.34.1': No such file or directory
I1119 22:19:56.508064 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 --> /var/lib/minikube/images/kube-controller-manager_v1.34.1 (22831104 bytes)
I1119 22:19:56.508205 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1119 22:19:56.508348 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1
I1119 22:19:56.515236 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/etcd_3.6.4-0: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/etcd_3.6.4-0': No such file or directory
I1119 22:19:56.515270 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 --> /var/lib/minikube/images/etcd_3.6.4-0 (74320896 bytes)
I1119 22:19:56.555985 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-apiserver_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-apiserver_v1.34.1': No such file or directory
I1119 22:19:56.556025 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 --> /var/lib/minikube/images/kube-apiserver_v1.34.1 (27073024 bytes)
I1119 22:19:56.556078 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:56.556101 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/coredns_v1.12.1: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/coredns_v1.12.1': No such file or directory
I1119 22:19:56.556122 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 --> /var/lib/minikube/images/coredns_v1.12.1 (22394368 bytes)
I1119 22:19:56.571156 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-scheduler_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-scheduler_v1.34.1': No such file or directory
I1119 22:19:56.571205 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1119 22:19:56.571220 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 --> /var/lib/minikube/images/kube-scheduler_v1.34.1 (17396736 bytes)
I1119 22:19:56.571322 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1
I1119 22:19:56.646952 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1119 22:19:56.646960 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/pause_3.10.1: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/pause_3.10.1': No such file or directory
I1119 22:19:56.646995 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 --> /var/lib/minikube/images/pause_3.10.1 (321024 bytes)
I1119 22:19:56.647066 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1
I1119 22:19:56.713984 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-proxy_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-proxy_v1.34.1': No such file or directory
I1119 22:19:56.714047 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 --> /var/lib/minikube/images/kube-proxy_v1.34.1 (25966080 bytes)
I1119 22:19:56.738791 248121 containerd.go:285] Loading image: /var/lib/minikube/images/pause_3.10.1
I1119 22:19:56.738923 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/pause_3.10.1
I1119 22:19:56.888282 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 from cache
I1119 22:19:56.888324 248121 containerd.go:285] Loading image: /var/lib/minikube/images/kube-scheduler_v1.34.1
I1119 22:19:56.888394 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1
I1119 22:19:57.461211 248121 containerd.go:267] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562"
I1119 22:19:57.461286 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:19:57.982686 248121 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1: (1.094253154s)
I1119 22:19:57.982716 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 from cache
I1119 22:19:57.982712 248121 cache_images.go:118] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562" in container runtime
I1119 22:19:57.982738 248121 containerd.go:285] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1119 22:19:57.982764 248121 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:19:57.982789 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1119 22:19:57.982801 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:58.943228 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 from cache
I1119 22:19:58.943276 248121 containerd.go:285] Loading image: /var/lib/minikube/images/coredns_v1.12.1
I1119 22:19:58.943321 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1
I1119 22:19:58.943326 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:19:55.919868 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:19:55.920354 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1119 22:19:55.920400 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:19:55.920445 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:19:55.949031 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:55.949059 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:55.949065 216336 cri.go:89] found id: ""
I1119 22:19:55.949074 216336 logs.go:282] 2 containers: [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:19:55.949133 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:55.953108 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:55.957378 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:19:55.957442 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:19:55.987066 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:55.987094 216336 cri.go:89] found id: ""
I1119 22:19:55.987104 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:19:55.987165 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:55.991215 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:19:55.991296 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:19:56.020982 216336 cri.go:89] found id: ""
I1119 22:19:56.021011 216336 logs.go:282] 0 containers: []
W1119 22:19:56.021022 216336 logs.go:284] No container was found matching "coredns"
I1119 22:19:56.021031 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:19:56.021093 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:19:56.051114 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:56.051138 216336 cri.go:89] found id: ""
I1119 22:19:56.051147 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:19:56.051210 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.056071 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:19:56.056142 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:19:56.085375 216336 cri.go:89] found id: ""
I1119 22:19:56.085398 216336 logs.go:282] 0 containers: []
W1119 22:19:56.085405 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:19:56.085414 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:19:56.085457 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:19:56.114914 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:56.114941 216336 cri.go:89] found id: ""
I1119 22:19:56.114951 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:19:56.115011 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.119718 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:19:56.119785 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:19:56.148992 216336 cri.go:89] found id: ""
I1119 22:19:56.149019 216336 logs.go:282] 0 containers: []
W1119 22:19:56.149029 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:19:56.149037 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:19:56.149096 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:19:56.179135 216336 cri.go:89] found id: ""
I1119 22:19:56.179163 216336 logs.go:282] 0 containers: []
W1119 22:19:56.179173 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:19:56.179190 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:19:56.179204 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:56.216379 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:19:56.216409 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:56.252073 216336 logs.go:123] Gathering logs for container status ...
I1119 22:19:56.252103 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:19:56.283542 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:19:56.283567 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:19:56.381327 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:19:56.381359 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:19:56.399981 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:19:56.400019 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:19:56.493857 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:19:56.493894 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:19:56.493913 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:56.537441 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:19:56.537473 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:19:56.590041 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:19:56.590076 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:56.633876 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:19:56.633925 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:59.179328 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:19:59.179856 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1119 22:19:59.179947 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:19:59.180012 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:19:59.213304 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:59.213329 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:59.213336 216336 cri.go:89] found id: ""
I1119 22:19:59.213346 216336 logs.go:282] 2 containers: [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:19:59.213410 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:59.218953 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:59.223649 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:19:59.223722 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:19:59.256070 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:59.256133 216336 cri.go:89] found id: ""
I1119 22:19:59.256144 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:19:59.256211 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:59.261436 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:19:59.261514 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:19:59.294827 216336 cri.go:89] found id: ""
I1119 22:19:59.294854 216336 logs.go:282] 0 containers: []
W1119 22:19:59.294864 216336 logs.go:284] No container was found matching "coredns"
I1119 22:19:59.294871 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:19:59.294944 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:19:59.328052 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:59.328078 216336 cri.go:89] found id: ""
I1119 22:19:59.328087 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:19:59.328148 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:59.333661 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:19:59.333745 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:19:59.367498 216336 cri.go:89] found id: ""
I1119 22:19:59.367525 216336 logs.go:282] 0 containers: []
W1119 22:19:59.367534 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:19:59.367543 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:19:59.367601 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:19:59.401843 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:59.401868 216336 cri.go:89] found id: ""
I1119 22:19:59.401877 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:19:59.401982 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:59.406399 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:19:59.406473 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:19:59.437867 216336 cri.go:89] found id: ""
I1119 22:19:59.437948 216336 logs.go:282] 0 containers: []
W1119 22:19:59.437957 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:19:59.437963 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:19:59.438041 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:19:59.465826 216336 cri.go:89] found id: ""
I1119 22:19:59.465856 216336 logs.go:282] 0 containers: []
W1119 22:19:59.465866 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:19:59.465905 216336 logs.go:123] Gathering logs for container status ...
I1119 22:19:59.465953 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:19:59.498633 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:19:59.498670 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:19:59.586643 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:19:59.586677 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:19:59.602123 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:19:59.602148 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:19:59.668657 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:19:59.668675 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:19:59.668702 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:59.705026 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:19:59.705060 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:59.741520 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:19:59.741550 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:59.780920 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:19:59.780952 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:59.819532 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:19:59.819572 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:59.861394 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:19:59.861428 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:19:57.633270 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:58.133177 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:58.633156 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:59.133958 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:59.632816 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:00.133904 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:00.633510 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:01.132810 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:01.632963 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:02.132866 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:00.209856 248121 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1: (1.266503638s)
I1119 22:20:00.209924 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 from cache
I1119 22:20:00.209943 248121 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.266589504s)
I1119 22:20:00.209953 248121 containerd.go:285] Loading image: /var/lib/minikube/images/kube-apiserver_v1.34.1
I1119 22:20:00.210022 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:20:00.210039 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1
I1119 22:20:01.315659 248121 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1: (1.105588091s)
I1119 22:20:01.315688 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 from cache
I1119 22:20:01.315709 248121 containerd.go:285] Loading image: /var/lib/minikube/images/kube-proxy_v1.34.1
I1119 22:20:01.315726 248121 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.105675845s)
I1119 22:20:01.315757 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1
I1119 22:20:01.315796 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:20:02.564406 248121 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1: (1.248612967s)
I1119 22:20:02.564435 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 from cache
I1119 22:20:02.564452 248121 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.248631025s)
I1119 22:20:02.564470 248121 containerd.go:285] Loading image: /var/lib/minikube/images/etcd_3.6.4-0
I1119 22:20:02.564502 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5
I1119 22:20:02.564519 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0
I1119 22:20:02.564590 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5
I1119 22:20:02.568829 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/storage-provisioner_v5: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/storage-provisioner_v5': No such file or directory
I1119 22:20:02.568862 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 --> /var/lib/minikube/images/storage-provisioner_v5 (9060352 bytes)
I1119 22:20:02.417703 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:20:02.418103 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1119 22:20:02.418159 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:20:02.418203 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:20:02.450244 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:20:02.450266 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:02.450271 216336 cri.go:89] found id: ""
I1119 22:20:02.450280 216336 logs.go:282] 2 containers: [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:20:02.450336 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:02.455477 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:02.460188 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:20:02.460263 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:20:02.491317 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:02.491341 216336 cri.go:89] found id: ""
I1119 22:20:02.491351 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:20:02.491409 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:02.495754 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:20:02.495837 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:20:02.526395 216336 cri.go:89] found id: ""
I1119 22:20:02.526421 216336 logs.go:282] 0 containers: []
W1119 22:20:02.526433 216336 logs.go:284] No container was found matching "coredns"
I1119 22:20:02.526441 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:20:02.526509 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:20:02.556596 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:02.556619 216336 cri.go:89] found id: ""
I1119 22:20:02.556629 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:20:02.556686 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:02.561029 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:20:02.561102 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:20:02.593442 216336 cri.go:89] found id: ""
I1119 22:20:02.593468 216336 logs.go:282] 0 containers: []
W1119 22:20:02.593480 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:20:02.593488 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:20:02.593547 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:20:02.626155 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:02.626181 216336 cri.go:89] found id: ""
I1119 22:20:02.626191 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:20:02.626239 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:02.630831 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:20:02.630910 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:20:02.663060 216336 cri.go:89] found id: ""
I1119 22:20:02.663088 216336 logs.go:282] 0 containers: []
W1119 22:20:02.663098 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:20:02.663106 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:20:02.663159 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:20:02.692104 216336 cri.go:89] found id: ""
I1119 22:20:02.692132 216336 logs.go:282] 0 containers: []
W1119 22:20:02.692142 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:20:02.692159 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:20:02.692172 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:02.730157 216336 logs.go:123] Gathering logs for container status ...
I1119 22:20:02.730198 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:20:02.764408 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:20:02.764435 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:20:02.871409 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:20:02.871460 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:20:02.912737 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:20:02.912778 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:02.958177 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:20:02.958229 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:03.003908 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:20:03.003950 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:20:03.062041 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:20:03.062076 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:20:03.080938 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:20:03.080972 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:20:03.153154 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:20:03.153177 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:20:03.153191 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:02.633509 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:03.132907 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:03.633598 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:04.133836 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:04.632911 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:05.133740 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:05.633397 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:06.133422 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:06.633053 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:07.133122 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:07.632971 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:07.709877 244005 kubeadm.go:1114] duration metric: took 12.184544724s to wait for elevateKubeSystemPrivileges
I1119 22:20:07.709929 244005 kubeadm.go:403] duration metric: took 23.328681682s to StartCluster
I1119 22:20:07.709949 244005 settings.go:142] acquiring lock: {Name:mk3c795849984e82ee99295088dd85252bd75f6f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:07.710024 244005 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21918-9296/kubeconfig
I1119 22:20:07.711281 244005 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/kubeconfig: {Name:mk5b9093863cb8ca8629eea9fd861356875781d2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:07.726769 244005 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1119 22:20:07.726909 244005 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.94.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1119 22:20:07.727036 244005 config.go:182] Loaded profile config "old-k8s-version-975700": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1119 22:20:07.727028 244005 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1119 22:20:07.727107 244005 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-975700"
I1119 22:20:07.727154 244005 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-975700"
I1119 22:20:07.727201 244005 host.go:66] Checking if "old-k8s-version-975700" exists ...
I1119 22:20:07.727269 244005 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-975700"
I1119 22:20:07.727331 244005 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-975700"
I1119 22:20:07.727652 244005 cli_runner.go:164] Run: docker container inspect old-k8s-version-975700 --format={{.State.Status}}
I1119 22:20:07.727759 244005 cli_runner.go:164] Run: docker container inspect old-k8s-version-975700 --format={{.State.Status}}
I1119 22:20:07.759624 244005 out.go:179] * Verifying Kubernetes components...
I1119 22:20:07.760449 244005 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-975700"
I1119 22:20:07.760487 244005 host.go:66] Checking if "old-k8s-version-975700" exists ...
I1119 22:20:07.760848 244005 cli_runner.go:164] Run: docker container inspect old-k8s-version-975700 --format={{.State.Status}}
I1119 22:20:07.781264 244005 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1119 22:20:07.781292 244005 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1119 22:20:07.781358 244005 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-975700
I1119 22:20:07.790624 244005 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:20:07.790705 244005 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1119 22:20:07.805293 244005 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/old-k8s-version-975700/id_rsa Username:docker}
I1119 22:20:07.811125 244005 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1119 22:20:07.811152 244005 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1119 22:20:07.811221 244005 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-975700
I1119 22:20:07.839037 244005 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/old-k8s-version-975700/id_rsa Username:docker}
I1119 22:20:07.927378 244005 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1119 22:20:07.930474 244005 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1119 22:20:07.930565 244005 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.94.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1119 22:20:07.945012 244005 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1119 22:20:08.325616 244005 start.go:977] {"host.minikube.internal": 192.168.94.1} host record injected into CoreDNS's ConfigMap
I1119 22:20:08.326981 244005 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-975700" to be "Ready" ...
I1119 22:20:08.525071 244005 out.go:179] * Enabled addons: default-storageclass, storage-provisioner
I1119 22:20:05.409665 248121 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0: (2.845117956s)
I1119 22:20:05.409701 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 from cache
I1119 22:20:05.409742 248121 containerd.go:285] Loading image: /var/lib/minikube/images/storage-provisioner_v5
I1119 22:20:05.409813 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5
I1119 22:20:05.827105 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
I1119 22:20:05.827149 248121 cache_images.go:125] Successfully loaded all cached images
I1119 22:20:05.827155 248121 cache_images.go:94] duration metric: took 9.641883158s to LoadCachedImages
I1119 22:20:05.827169 248121 kubeadm.go:935] updating node { 192.168.103.2 8443 v1.34.1 containerd true true} ...
I1119 22:20:05.827281 248121 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-638439 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.103.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:no-preload-638439 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1119 22:20:05.827350 248121 ssh_runner.go:195] Run: sudo crictl info
I1119 22:20:05.854538 248121 cni.go:84] Creating CNI manager for ""
I1119 22:20:05.854565 248121 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1119 22:20:05.854580 248121 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1119 22:20:05.854605 248121 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.103.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-638439 NodeName:no-preload-638439 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.103.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.103.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1119 22:20:05.854728 248121 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.103.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "no-preload-638439"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.103.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.103.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1119 22:20:05.854794 248121 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1119 22:20:05.863483 248121 binaries.go:54] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.34.1: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/binaries/v1.34.1': No such file or directory
Initiating transfer...
I1119 22:20:05.863536 248121 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.34.1
I1119 22:20:05.871942 248121 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl.sha256
I1119 22:20:05.871968 248121 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet.sha256 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/linux/amd64/v1.34.1/kubelet
I1119 22:20:05.871947 248121 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm.sha256 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/linux/amd64/v1.34.1/kubeadm
I1119 22:20:05.872035 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl
I1119 22:20:05.876399 248121 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubectl: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubectl': No such file or directory
I1119 22:20:05.876433 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/linux/amd64/v1.34.1/kubectl --> /var/lib/minikube/binaries/v1.34.1/kubectl (60559544 bytes)
I1119 22:20:07.043592 248121 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1119 22:20:07.058665 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet
I1119 22:20:07.063097 248121 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubelet: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubelet': No such file or directory
I1119 22:20:07.063136 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/linux/amd64/v1.34.1/kubelet --> /var/lib/minikube/binaries/v1.34.1/kubelet (59195684 bytes)
I1119 22:20:07.259328 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm
I1119 22:20:07.263904 248121 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubeadm: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubeadm': No such file or directory
I1119 22:20:07.263944 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/linux/amd64/v1.34.1/kubeadm --> /var/lib/minikube/binaries/v1.34.1/kubeadm (74027192 bytes)
I1119 22:20:07.467537 248121 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1119 22:20:07.476103 248121 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (322 bytes)
I1119 22:20:07.489039 248121 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1119 22:20:07.504456 248121 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2232 bytes)
I1119 22:20:07.517675 248121 ssh_runner.go:195] Run: grep 192.168.103.2 control-plane.minikube.internal$ /etc/hosts
I1119 22:20:07.521966 248121 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.103.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1119 22:20:07.532448 248121 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1119 22:20:07.616669 248121 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1119 22:20:07.647854 248121 certs.go:69] Setting up /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439 for IP: 192.168.103.2
I1119 22:20:07.647911 248121 certs.go:195] generating shared ca certs ...
I1119 22:20:07.647941 248121 certs.go:227] acquiring lock for ca certs: {Name:mkfe62d1b64cfdbe1c6a3d1f38aa0edc5b9ec419 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:07.648100 248121 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21918-9296/.minikube/ca.key
I1119 22:20:07.648156 248121 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21918-9296/.minikube/proxy-client-ca.key
I1119 22:20:07.648169 248121 certs.go:257] generating profile certs ...
I1119 22:20:07.648233 248121 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/client.key
I1119 22:20:07.648249 248121 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/client.crt with IP's: []
I1119 22:20:08.248835 248121 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/client.crt ...
I1119 22:20:08.248872 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/client.crt: {Name:mk71551595bc691ff029aa4f22d8136d735c86c8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:08.249095 248121 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/client.key ...
I1119 22:20:08.249107 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/client.key: {Name:mk7714d393e738013c7abe0f1689bcf490e26b5b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:08.249250 248121 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.key.6e1d1cff
I1119 22:20:08.249267 248121 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.crt.6e1d1cff with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.103.2]
I1119 22:20:09.018572 248121 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.crt.6e1d1cff ...
I1119 22:20:09.018603 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.crt.6e1d1cff: {Name:mk1a2db3ea3ff5c82c4c822f2131fbadbd39c724 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:09.018790 248121 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.key.6e1d1cff ...
I1119 22:20:09.018808 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.key.6e1d1cff: {Name:mk13f089d71bdc7abee8608285249f8ab5ad14b2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:09.018926 248121 certs.go:382] copying /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.crt.6e1d1cff -> /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.crt
I1119 22:20:09.019033 248121 certs.go:386] copying /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.key.6e1d1cff -> /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.key
I1119 22:20:09.019118 248121 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.key
I1119 22:20:09.019145 248121 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.crt with IP's: []
I1119 22:20:09.141320 248121 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.crt ...
I1119 22:20:09.141353 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.crt: {Name:mke73db150d5fe88961c2b7ca7e43e6cb8c1e87f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:09.141532 248121 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.key ...
I1119 22:20:09.141550 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.key: {Name:mk65b56a4bcd9d60fdf62f046abf7a5abe0e729f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:09.141750 248121 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/12821.pem (1338 bytes)
W1119 22:20:09.141799 248121 certs.go:480] ignoring /home/jenkins/minikube-integration/21918-9296/.minikube/certs/12821_empty.pem, impossibly tiny 0 bytes
I1119 22:20:09.141812 248121 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca-key.pem (1675 bytes)
I1119 22:20:09.141845 248121 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca.pem (1078 bytes)
I1119 22:20:09.141894 248121 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/cert.pem (1123 bytes)
I1119 22:20:09.141928 248121 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/key.pem (1679 bytes)
I1119 22:20:09.141984 248121 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-9296/.minikube/files/etc/ssl/certs/128212.pem (1708 bytes)
I1119 22:20:09.142554 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1119 22:20:09.161569 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1119 22:20:09.180990 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1119 22:20:09.199264 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1119 22:20:09.217135 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1119 22:20:09.236364 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1119 22:20:09.255084 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1119 22:20:09.274604 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1119 22:20:09.293451 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1119 22:20:09.315834 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/certs/12821.pem --> /usr/share/ca-certificates/12821.pem (1338 bytes)
I1119 22:20:09.336567 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/files/etc/ssl/certs/128212.pem --> /usr/share/ca-certificates/128212.pem (1708 bytes)
I1119 22:20:09.354248 248121 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1119 22:20:09.367868 248121 ssh_runner.go:195] Run: openssl version
I1119 22:20:09.374260 248121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1119 22:20:09.383332 248121 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1119 22:20:09.387801 248121 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 19 21:48 /usr/share/ca-certificates/minikubeCA.pem
I1119 22:20:09.387864 248121 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1119 22:20:09.424342 248121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1119 22:20:09.433605 248121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/12821.pem && ln -fs /usr/share/ca-certificates/12821.pem /etc/ssl/certs/12821.pem"
I1119 22:20:09.442478 248121 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/12821.pem
I1119 22:20:09.446634 248121 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 19 21:53 /usr/share/ca-certificates/12821.pem
I1119 22:20:09.446694 248121 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/12821.pem
I1119 22:20:09.481876 248121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/12821.pem /etc/ssl/certs/51391683.0"
I1119 22:20:09.491181 248121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/128212.pem && ln -fs /usr/share/ca-certificates/128212.pem /etc/ssl/certs/128212.pem"
I1119 22:20:09.499823 248121 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/128212.pem
I1119 22:20:09.503986 248121 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 19 21:53 /usr/share/ca-certificates/128212.pem
I1119 22:20:09.504043 248121 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/128212.pem
I1119 22:20:09.539481 248121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/128212.pem /etc/ssl/certs/3ec20f2e.0"
I1119 22:20:09.548630 248121 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1119 22:20:09.552649 248121 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1119 22:20:09.552709 248121 kubeadm.go:401] StartCluster: {Name:no-preload-638439 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-638439 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISer
verNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFir
mwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1119 22:20:09.552800 248121 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1119 22:20:09.552841 248121 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1119 22:20:09.580504 248121 cri.go:89] found id: ""
I1119 22:20:09.580577 248121 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1119 22:20:09.588825 248121 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1119 22:20:09.597263 248121 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1119 22:20:09.597312 248121 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1119 22:20:09.605431 248121 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1119 22:20:09.605448 248121 kubeadm.go:158] found existing configuration files:
I1119 22:20:09.605505 248121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1119 22:20:09.613580 248121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1119 22:20:09.613647 248121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1119 22:20:09.621432 248121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1119 22:20:09.629381 248121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1119 22:20:09.629444 248121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1119 22:20:09.637498 248121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1119 22:20:09.645457 248121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1119 22:20:09.645500 248121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1119 22:20:09.653775 248121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1119 22:20:09.662581 248121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1119 22:20:09.662631 248121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1119 22:20:09.670267 248121 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1119 22:20:09.705969 248121 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1119 22:20:09.706049 248121 kubeadm.go:319] [preflight] Running pre-flight checks
I1119 22:20:09.725461 248121 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1119 22:20:09.725557 248121 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1043-gcp[0m
I1119 22:20:09.725619 248121 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1119 22:20:09.725688 248121 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1119 22:20:09.725759 248121 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1119 22:20:09.725823 248121 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1119 22:20:09.725926 248121 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1119 22:20:09.726011 248121 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1119 22:20:09.726090 248121 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1119 22:20:09.726169 248121 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1119 22:20:09.726247 248121 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1119 22:20:09.785631 248121 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1119 22:20:09.785785 248121 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1119 22:20:09.785930 248121 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1119 22:20:09.790816 248121 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1119 22:20:05.698391 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:20:08.526183 244005 addons.go:515] duration metric: took 799.151282ms for enable addons: enabled=[default-storageclass storage-provisioner]
I1119 22:20:08.830648 244005 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-975700" context rescaled to 1 replicas
W1119 22:20:10.330548 244005 node_ready.go:57] node "old-k8s-version-975700" has "Ready":"False" status (will retry)
W1119 22:20:12.330688 244005 node_ready.go:57] node "old-k8s-version-975700" has "Ready":"False" status (will retry)
I1119 22:20:09.792948 248121 out.go:252] - Generating certificates and keys ...
I1119 22:20:09.793051 248121 kubeadm.go:319] [certs] Using existing ca certificate authority
I1119 22:20:09.793149 248121 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1119 22:20:10.738826 248121 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1119 22:20:10.908170 248121 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1119 22:20:11.291841 248121 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1119 22:20:11.623960 248121 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1119 22:20:11.828384 248121 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1119 22:20:11.828565 248121 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost no-preload-638439] and IPs [192.168.103.2 127.0.0.1 ::1]
I1119 22:20:12.233215 248121 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1119 22:20:12.233354 248121 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost no-preload-638439] and IPs [192.168.103.2 127.0.0.1 ::1]
I1119 22:20:12.358552 248121 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1119 22:20:12.567027 248121 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1119 22:20:12.649341 248121 kubeadm.go:319] [certs] Generating "sa" key and public key
I1119 22:20:12.649468 248121 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1119 22:20:12.821942 248121 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1119 22:20:13.184331 248121 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1119 22:20:13.249251 248121 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1119 22:20:13.507036 248121 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1119 22:20:13.992391 248121 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1119 22:20:13.992949 248121 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1119 22:20:14.073515 248121 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1119 22:20:10.699588 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1119 22:20:10.699656 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:20:10.699719 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:20:10.736721 216336 cri.go:89] found id: "7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7"
I1119 22:20:10.736747 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:20:10.736753 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:10.736758 216336 cri.go:89] found id: ""
I1119 22:20:10.736767 216336 logs.go:282] 3 containers: [7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:20:10.736834 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:10.742155 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:10.747306 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:10.752281 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:20:10.752356 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:20:10.785664 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:10.785691 216336 cri.go:89] found id: ""
I1119 22:20:10.785700 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:20:10.785758 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:10.791037 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:20:10.791107 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:20:10.827690 216336 cri.go:89] found id: ""
I1119 22:20:10.827736 216336 logs.go:282] 0 containers: []
W1119 22:20:10.827749 216336 logs.go:284] No container was found matching "coredns"
I1119 22:20:10.827781 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:20:10.827856 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:20:10.860463 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:10.860489 216336 cri.go:89] found id: ""
I1119 22:20:10.860499 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:20:10.860557 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:10.865818 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:20:10.865902 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:20:10.896395 216336 cri.go:89] found id: ""
I1119 22:20:10.896425 216336 logs.go:282] 0 containers: []
W1119 22:20:10.896457 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:20:10.896464 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:20:10.896524 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:20:10.927065 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:10.927091 216336 cri.go:89] found id: ""
I1119 22:20:10.927100 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:20:10.927157 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:10.931718 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:20:10.931789 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:20:10.960849 216336 cri.go:89] found id: ""
I1119 22:20:10.960892 216336 logs.go:282] 0 containers: []
W1119 22:20:10.960903 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:20:10.960910 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:20:10.960962 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:20:10.993029 216336 cri.go:89] found id: ""
I1119 22:20:10.993057 216336 logs.go:282] 0 containers: []
W1119 22:20:10.993067 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:20:10.993080 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:20:10.993094 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:20:11.027974 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:20:11.028010 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:11.062086 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:20:11.062120 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:11.103210 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:20:11.103250 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:11.145837 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:20:11.145872 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:20:11.199841 216336 logs.go:123] Gathering logs for container status ...
I1119 22:20:11.199937 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:20:11.236586 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:20:11.236618 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:20:11.253432 216336 logs.go:123] Gathering logs for kube-apiserver [7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7] ...
I1119 22:20:11.253487 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7"
I1119 22:20:11.295903 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:20:11.295943 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:11.337708 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:20:11.337745 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:20:11.452249 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:20:11.452285 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:20:14.830008 244005 node_ready.go:57] node "old-k8s-version-975700" has "Ready":"False" status (will retry)
W1119 22:20:16.830268 244005 node_ready.go:57] node "old-k8s-version-975700" has "Ready":"False" status (will retry)
I1119 22:20:14.075591 248121 out.go:252] - Booting up control plane ...
I1119 22:20:14.075701 248121 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1119 22:20:14.075795 248121 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1119 22:20:14.076511 248121 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1119 22:20:14.092600 248121 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1119 22:20:14.092767 248121 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1119 22:20:14.099651 248121 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1119 22:20:14.099786 248121 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1119 22:20:14.099865 248121 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1119 22:20:14.205620 248121 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1119 22:20:14.205784 248121 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1119 22:20:14.707136 248121 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 501.67843ms
I1119 22:20:14.711176 248121 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1119 22:20:14.711406 248121 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.103.2:8443/livez
I1119 22:20:14.711556 248121 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1119 22:20:14.711669 248121 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1119 22:20:16.370429 248121 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 1.659105526s
I1119 22:20:16.919263 248121 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 2.208262146s
I1119 22:20:18.712413 248121 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 4.001122323s
I1119 22:20:18.724319 248121 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1119 22:20:18.734195 248121 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1119 22:20:18.743489 248121 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1119 22:20:18.743707 248121 kubeadm.go:319] [mark-control-plane] Marking the node no-preload-638439 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1119 22:20:18.749843 248121 kubeadm.go:319] [bootstrap-token] Using token: tkvbyg.4blpqvlc8c0koqab
I1119 22:20:18.751541 248121 out.go:252] - Configuring RBAC rules ...
I1119 22:20:18.751647 248121 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1119 22:20:18.754347 248121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1119 22:20:18.760461 248121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1119 22:20:18.763019 248121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1119 22:20:18.765434 248121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1119 22:20:18.768021 248121 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1119 22:20:19.119568 248121 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1119 22:20:19.537037 248121 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1119 22:20:20.119469 248121 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1119 22:20:20.120399 248121 kubeadm.go:319]
I1119 22:20:20.120467 248121 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1119 22:20:20.120472 248121 kubeadm.go:319]
I1119 22:20:20.120605 248121 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1119 22:20:20.120632 248121 kubeadm.go:319]
I1119 22:20:20.120661 248121 kubeadm.go:319] mkdir -p $HOME/.kube
I1119 22:20:20.120719 248121 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1119 22:20:20.120765 248121 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1119 22:20:20.120772 248121 kubeadm.go:319]
I1119 22:20:20.120845 248121 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1119 22:20:20.120857 248121 kubeadm.go:319]
I1119 22:20:20.121004 248121 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1119 22:20:20.121029 248121 kubeadm.go:319]
I1119 22:20:20.121103 248121 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1119 22:20:20.121207 248121 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1119 22:20:20.121271 248121 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1119 22:20:20.121297 248121 kubeadm.go:319]
I1119 22:20:20.121444 248121 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1119 22:20:20.121523 248121 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1119 22:20:20.121533 248121 kubeadm.go:319]
I1119 22:20:20.121611 248121 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token tkvbyg.4blpqvlc8c0koqab \
I1119 22:20:20.121712 248121 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:6123875ff628fb9eedbd72f2253477865aa197083b84a1d60cb6c00de308bc63 \
I1119 22:20:20.121734 248121 kubeadm.go:319] --control-plane
I1119 22:20:20.121738 248121 kubeadm.go:319]
I1119 22:20:20.121810 248121 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1119 22:20:20.121816 248121 kubeadm.go:319]
I1119 22:20:20.121927 248121 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token tkvbyg.4blpqvlc8c0koqab \
I1119 22:20:20.122034 248121 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:6123875ff628fb9eedbd72f2253477865aa197083b84a1d60cb6c00de308bc63
I1119 22:20:20.124555 248121 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1043-gcp\n", err: exit status 1
I1119 22:20:20.124740 248121 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1119 22:20:20.124773 248121 cni.go:84] Creating CNI manager for ""
I1119 22:20:20.124786 248121 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1119 22:20:20.127350 248121 out.go:179] * Configuring CNI (Container Networking Interface) ...
W1119 22:20:19.330624 244005 node_ready.go:57] node "old-k8s-version-975700" has "Ready":"False" status (will retry)
W1119 22:20:21.830427 244005 node_ready.go:57] node "old-k8s-version-975700" has "Ready":"False" status (will retry)
I1119 22:20:22.330516 244005 node_ready.go:49] node "old-k8s-version-975700" is "Ready"
I1119 22:20:22.330545 244005 node_ready.go:38] duration metric: took 14.003533581s for node "old-k8s-version-975700" to be "Ready" ...
I1119 22:20:22.330557 244005 api_server.go:52] waiting for apiserver process to appear ...
I1119 22:20:22.330607 244005 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1119 22:20:22.343206 244005 api_server.go:72] duration metric: took 14.6162161s to wait for apiserver process to appear ...
I1119 22:20:22.343236 244005 api_server.go:88] waiting for apiserver healthz status ...
I1119 22:20:22.343259 244005 api_server.go:253] Checking apiserver healthz at https://192.168.94.2:8443/healthz ...
I1119 22:20:22.347053 244005 api_server.go:279] https://192.168.94.2:8443/healthz returned 200:
ok
I1119 22:20:22.348151 244005 api_server.go:141] control plane version: v1.28.0
I1119 22:20:22.348175 244005 api_server.go:131] duration metric: took 4.933094ms to wait for apiserver health ...
I1119 22:20:22.348183 244005 system_pods.go:43] waiting for kube-system pods to appear ...
I1119 22:20:22.351821 244005 system_pods.go:59] 8 kube-system pods found
I1119 22:20:22.351849 244005 system_pods.go:61] "coredns-5dd5756b68-8hdh7" [a4057bf2-fe2e-42db-83e9-bc625724c61c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:20:22.351854 244005 system_pods.go:61] "etcd-old-k8s-version-975700" [12a76858-b7be-4963-8323-fe57ca12a08d] Running
I1119 22:20:22.351860 244005 system_pods.go:61] "kindnet-mlzfc" [e2532f4d-a32b-45a0-b846-1d2ecea1f926] Running
I1119 22:20:22.351864 244005 system_pods.go:61] "kube-apiserver-old-k8s-version-975700" [28d03966-c950-4e9c-bbd5-4aeb08bb3363] Running
I1119 22:20:22.351869 244005 system_pods.go:61] "kube-controller-manager-old-k8s-version-975700" [b2f2d323-34b1-47a7-945e-73086e2e6887] Running
I1119 22:20:22.351873 244005 system_pods.go:61] "kube-proxy-rnxxf" [f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d] Running
I1119 22:20:22.351877 244005 system_pods.go:61] "kube-scheduler-old-k8s-version-975700" [65c95750-3a2f-4847-a93d-4e54bc709449] Running
I1119 22:20:22.351892 244005 system_pods.go:61] "storage-provisioner" [6c937194-8889-47a0-b05f-7af799e18044] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1119 22:20:22.351898 244005 system_pods.go:74] duration metric: took 3.709193ms to wait for pod list to return data ...
I1119 22:20:22.351906 244005 default_sa.go:34] waiting for default service account to be created ...
I1119 22:20:22.353863 244005 default_sa.go:45] found service account: "default"
I1119 22:20:22.353906 244005 default_sa.go:55] duration metric: took 1.968518ms for default service account to be created ...
I1119 22:20:22.353917 244005 system_pods.go:116] waiting for k8s-apps to be running ...
I1119 22:20:22.356763 244005 system_pods.go:86] 8 kube-system pods found
I1119 22:20:22.356787 244005 system_pods.go:89] "coredns-5dd5756b68-8hdh7" [a4057bf2-fe2e-42db-83e9-bc625724c61c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:20:22.356792 244005 system_pods.go:89] "etcd-old-k8s-version-975700" [12a76858-b7be-4963-8323-fe57ca12a08d] Running
I1119 22:20:22.356799 244005 system_pods.go:89] "kindnet-mlzfc" [e2532f4d-a32b-45a0-b846-1d2ecea1f926] Running
I1119 22:20:22.356803 244005 system_pods.go:89] "kube-apiserver-old-k8s-version-975700" [28d03966-c950-4e9c-bbd5-4aeb08bb3363] Running
I1119 22:20:22.356810 244005 system_pods.go:89] "kube-controller-manager-old-k8s-version-975700" [b2f2d323-34b1-47a7-945e-73086e2e6887] Running
I1119 22:20:22.356813 244005 system_pods.go:89] "kube-proxy-rnxxf" [f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d] Running
I1119 22:20:22.356817 244005 system_pods.go:89] "kube-scheduler-old-k8s-version-975700" [65c95750-3a2f-4847-a93d-4e54bc709449] Running
I1119 22:20:22.356822 244005 system_pods.go:89] "storage-provisioner" [6c937194-8889-47a0-b05f-7af799e18044] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1119 22:20:22.356838 244005 retry.go:31] will retry after 295.130955ms: missing components: kube-dns
I1119 22:20:20.128552 248121 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1119 22:20:20.133893 248121 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.1/kubectl ...
I1119 22:20:20.133928 248121 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1119 22:20:20.148247 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1119 22:20:20.366418 248121 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1119 22:20:20.366472 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:20.366530 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes no-preload-638439 minikube.k8s.io/updated_at=2025_11_19T22_20_20_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=08454a179ffa60c8ae500105aac58654b5cdef58 minikube.k8s.io/name=no-preload-638439 minikube.k8s.io/primary=true
I1119 22:20:20.472760 248121 ops.go:34] apiserver oom_adj: -16
I1119 22:20:20.472956 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:20.973815 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:21.473583 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:21.973622 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:22.473704 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:22.973336 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:23.473849 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:23.973455 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:24.472997 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:24.537110 248121 kubeadm.go:1114] duration metric: took 4.170685845s to wait for elevateKubeSystemPrivileges
I1119 22:20:24.537150 248121 kubeadm.go:403] duration metric: took 14.984446293s to StartCluster
I1119 22:20:24.537173 248121 settings.go:142] acquiring lock: {Name:mk3c795849984e82ee99295088dd85252bd75f6f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:24.537243 248121 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21918-9296/kubeconfig
I1119 22:20:24.539105 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/kubeconfig: {Name:mk5b9093863cb8ca8629eea9fd861356875781d2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:24.539319 248121 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1119 22:20:24.539342 248121 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1119 22:20:24.539397 248121 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1119 22:20:24.539519 248121 addons.go:70] Setting storage-provisioner=true in profile "no-preload-638439"
I1119 22:20:24.539532 248121 config.go:182] Loaded profile config "no-preload-638439": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1119 22:20:24.539540 248121 addons.go:239] Setting addon storage-provisioner=true in "no-preload-638439"
I1119 22:20:24.539552 248121 addons.go:70] Setting default-storageclass=true in profile "no-preload-638439"
I1119 22:20:24.539577 248121 host.go:66] Checking if "no-preload-638439" exists ...
I1119 22:20:24.539588 248121 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "no-preload-638439"
I1119 22:20:24.539936 248121 cli_runner.go:164] Run: docker container inspect no-preload-638439 --format={{.State.Status}}
I1119 22:20:24.540134 248121 cli_runner.go:164] Run: docker container inspect no-preload-638439 --format={{.State.Status}}
I1119 22:20:24.541288 248121 out.go:179] * Verifying Kubernetes components...
I1119 22:20:24.543039 248121 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1119 22:20:24.564207 248121 addons.go:239] Setting addon default-storageclass=true in "no-preload-638439"
I1119 22:20:24.564253 248121 host.go:66] Checking if "no-preload-638439" exists ...
I1119 22:20:24.564597 248121 cli_runner.go:164] Run: docker container inspect no-preload-638439 --format={{.State.Status}}
I1119 22:20:24.564680 248121 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:20:24.568527 248121 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1119 22:20:24.568546 248121 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1119 22:20:24.568596 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:20:24.597385 248121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa Username:docker}
I1119 22:20:24.599498 248121 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1119 22:20:24.599523 248121 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1119 22:20:24.599582 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:20:24.624046 248121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa Username:docker}
I1119 22:20:24.628608 248121 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.103.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1119 22:20:24.684697 248121 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1119 22:20:24.711970 248121 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1119 22:20:24.742786 248121 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1119 22:20:24.836401 248121 start.go:977] {"host.minikube.internal": 192.168.103.1} host record injected into CoreDNS's ConfigMap
I1119 22:20:24.837864 248121 node_ready.go:35] waiting up to 6m0s for node "no-preload-638439" to be "Ready" ...
I1119 22:20:25.026785 248121 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1119 22:20:21.527976 216336 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (10.075664087s)
W1119 22:20:21.528025 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Unable to connect to the server: net/http: TLS handshake timeout
output:
** stderr **
Unable to connect to the server: net/http: TLS handshake timeout
** /stderr **
I1119 22:20:24.028516 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:20:22.657454 244005 system_pods.go:86] 8 kube-system pods found
I1119 22:20:22.657490 244005 system_pods.go:89] "coredns-5dd5756b68-8hdh7" [a4057bf2-fe2e-42db-83e9-bc625724c61c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:20:22.657499 244005 system_pods.go:89] "etcd-old-k8s-version-975700" [12a76858-b7be-4963-8323-fe57ca12a08d] Running
I1119 22:20:22.657508 244005 system_pods.go:89] "kindnet-mlzfc" [e2532f4d-a32b-45a0-b846-1d2ecea1f926] Running
I1119 22:20:22.657513 244005 system_pods.go:89] "kube-apiserver-old-k8s-version-975700" [28d03966-c950-4e9c-bbd5-4aeb08bb3363] Running
I1119 22:20:22.657520 244005 system_pods.go:89] "kube-controller-manager-old-k8s-version-975700" [b2f2d323-34b1-47a7-945e-73086e2e6887] Running
I1119 22:20:22.657526 244005 system_pods.go:89] "kube-proxy-rnxxf" [f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d] Running
I1119 22:20:22.657534 244005 system_pods.go:89] "kube-scheduler-old-k8s-version-975700" [65c95750-3a2f-4847-a93d-4e54bc709449] Running
I1119 22:20:22.657541 244005 system_pods.go:89] "storage-provisioner" [6c937194-8889-47a0-b05f-7af799e18044] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1119 22:20:22.657562 244005 retry.go:31] will retry after 290.603952ms: missing components: kube-dns
I1119 22:20:22.951933 244005 system_pods.go:86] 8 kube-system pods found
I1119 22:20:22.951963 244005 system_pods.go:89] "coredns-5dd5756b68-8hdh7" [a4057bf2-fe2e-42db-83e9-bc625724c61c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:20:22.951969 244005 system_pods.go:89] "etcd-old-k8s-version-975700" [12a76858-b7be-4963-8323-fe57ca12a08d] Running
I1119 22:20:22.951974 244005 system_pods.go:89] "kindnet-mlzfc" [e2532f4d-a32b-45a0-b846-1d2ecea1f926] Running
I1119 22:20:22.951978 244005 system_pods.go:89] "kube-apiserver-old-k8s-version-975700" [28d03966-c950-4e9c-bbd5-4aeb08bb3363] Running
I1119 22:20:22.951983 244005 system_pods.go:89] "kube-controller-manager-old-k8s-version-975700" [b2f2d323-34b1-47a7-945e-73086e2e6887] Running
I1119 22:20:22.951988 244005 system_pods.go:89] "kube-proxy-rnxxf" [f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d] Running
I1119 22:20:22.951992 244005 system_pods.go:89] "kube-scheduler-old-k8s-version-975700" [65c95750-3a2f-4847-a93d-4e54bc709449] Running
I1119 22:20:22.951996 244005 system_pods.go:89] "storage-provisioner" [6c937194-8889-47a0-b05f-7af799e18044] Running
I1119 22:20:22.952009 244005 retry.go:31] will retry after 460.674944ms: missing components: kube-dns
I1119 22:20:23.417271 244005 system_pods.go:86] 8 kube-system pods found
I1119 22:20:23.417302 244005 system_pods.go:89] "coredns-5dd5756b68-8hdh7" [a4057bf2-fe2e-42db-83e9-bc625724c61c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:20:23.417309 244005 system_pods.go:89] "etcd-old-k8s-version-975700" [12a76858-b7be-4963-8323-fe57ca12a08d] Running
I1119 22:20:23.417314 244005 system_pods.go:89] "kindnet-mlzfc" [e2532f4d-a32b-45a0-b846-1d2ecea1f926] Running
I1119 22:20:23.417320 244005 system_pods.go:89] "kube-apiserver-old-k8s-version-975700" [28d03966-c950-4e9c-bbd5-4aeb08bb3363] Running
I1119 22:20:23.417326 244005 system_pods.go:89] "kube-controller-manager-old-k8s-version-975700" [b2f2d323-34b1-47a7-945e-73086e2e6887] Running
I1119 22:20:23.417331 244005 system_pods.go:89] "kube-proxy-rnxxf" [f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d] Running
I1119 22:20:23.417336 244005 system_pods.go:89] "kube-scheduler-old-k8s-version-975700" [65c95750-3a2f-4847-a93d-4e54bc709449] Running
I1119 22:20:23.417341 244005 system_pods.go:89] "storage-provisioner" [6c937194-8889-47a0-b05f-7af799e18044] Running
I1119 22:20:23.417365 244005 retry.go:31] will retry after 513.116078ms: missing components: kube-dns
I1119 22:20:23.935257 244005 system_pods.go:86] 8 kube-system pods found
I1119 22:20:23.935284 244005 system_pods.go:89] "coredns-5dd5756b68-8hdh7" [a4057bf2-fe2e-42db-83e9-bc625724c61c] Running
I1119 22:20:23.935290 244005 system_pods.go:89] "etcd-old-k8s-version-975700" [12a76858-b7be-4963-8323-fe57ca12a08d] Running
I1119 22:20:23.935294 244005 system_pods.go:89] "kindnet-mlzfc" [e2532f4d-a32b-45a0-b846-1d2ecea1f926] Running
I1119 22:20:23.935297 244005 system_pods.go:89] "kube-apiserver-old-k8s-version-975700" [28d03966-c950-4e9c-bbd5-4aeb08bb3363] Running
I1119 22:20:23.935301 244005 system_pods.go:89] "kube-controller-manager-old-k8s-version-975700" [b2f2d323-34b1-47a7-945e-73086e2e6887] Running
I1119 22:20:23.935304 244005 system_pods.go:89] "kube-proxy-rnxxf" [f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d] Running
I1119 22:20:23.935308 244005 system_pods.go:89] "kube-scheduler-old-k8s-version-975700" [65c95750-3a2f-4847-a93d-4e54bc709449] Running
I1119 22:20:23.935311 244005 system_pods.go:89] "storage-provisioner" [6c937194-8889-47a0-b05f-7af799e18044] Running
I1119 22:20:23.935318 244005 system_pods.go:126] duration metric: took 1.581396028s to wait for k8s-apps to be running ...
I1119 22:20:23.935324 244005 system_svc.go:44] waiting for kubelet service to be running ....
I1119 22:20:23.935362 244005 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1119 22:20:23.948529 244005 system_svc.go:56] duration metric: took 13.192475ms WaitForService to wait for kubelet
I1119 22:20:23.948562 244005 kubeadm.go:587] duration metric: took 16.221575338s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1119 22:20:23.948584 244005 node_conditions.go:102] verifying NodePressure condition ...
I1119 22:20:23.951344 244005 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1119 22:20:23.951368 244005 node_conditions.go:123] node cpu capacity is 8
I1119 22:20:23.951381 244005 node_conditions.go:105] duration metric: took 2.792615ms to run NodePressure ...
I1119 22:20:23.951394 244005 start.go:242] waiting for startup goroutines ...
I1119 22:20:23.951400 244005 start.go:247] waiting for cluster config update ...
I1119 22:20:23.951411 244005 start.go:256] writing updated cluster config ...
I1119 22:20:23.951671 244005 ssh_runner.go:195] Run: rm -f paused
I1119 22:20:23.955724 244005 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1119 22:20:23.960403 244005 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-8hdh7" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:23.964724 244005 pod_ready.go:94] pod "coredns-5dd5756b68-8hdh7" is "Ready"
I1119 22:20:23.964745 244005 pod_ready.go:86] duration metric: took 4.323941ms for pod "coredns-5dd5756b68-8hdh7" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:23.969212 244005 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:23.977143 244005 pod_ready.go:94] pod "etcd-old-k8s-version-975700" is "Ready"
I1119 22:20:23.977172 244005 pod_ready.go:86] duration metric: took 7.932702ms for pod "etcd-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:23.984279 244005 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:23.990403 244005 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-975700" is "Ready"
I1119 22:20:23.990436 244005 pod_ready.go:86] duration metric: took 6.116437ms for pod "kube-apiserver-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:23.994759 244005 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:24.360199 244005 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-975700" is "Ready"
I1119 22:20:24.360227 244005 pod_ready.go:86] duration metric: took 365.436099ms for pod "kube-controller-manager-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:24.562023 244005 pod_ready.go:83] waiting for pod "kube-proxy-rnxxf" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:24.960397 244005 pod_ready.go:94] pod "kube-proxy-rnxxf" is "Ready"
I1119 22:20:24.960428 244005 pod_ready.go:86] duration metric: took 398.37739ms for pod "kube-proxy-rnxxf" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:25.161533 244005 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:25.560960 244005 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-975700" is "Ready"
I1119 22:20:25.560992 244005 pod_ready.go:86] duration metric: took 399.43384ms for pod "kube-scheduler-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:25.561003 244005 pod_ready.go:40] duration metric: took 1.605243985s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1119 22:20:25.605359 244005 start.go:628] kubectl: 1.34.2, cluster: 1.28.0 (minor skew: 6)
I1119 22:20:25.607589 244005 out.go:203]
W1119 22:20:25.608986 244005 out.go:285] ! /usr/local/bin/kubectl is version 1.34.2, which may have incompatibilities with Kubernetes 1.28.0.
I1119 22:20:25.610519 244005 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1119 22:20:25.612224 244005 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-975700" cluster and "default" namespace by default
I1119 22:20:25.028260 248121 addons.go:515] duration metric: took 488.871855ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1119 22:20:25.340186 248121 kapi.go:214] "coredns" deployment in "kube-system" namespace and "no-preload-638439" context rescaled to 1 replicas
W1119 22:20:26.840695 248121 node_ready.go:57] node "no-preload-638439" has "Ready":"False" status (will retry)
W1119 22:20:28.841182 248121 node_ready.go:57] node "no-preload-638439" has "Ready":"False" status (will retry)
I1119 22:20:26.041396 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": read tcp 192.168.76.1:42420->192.168.76.2:8443: read: connection reset by peer
I1119 22:20:26.041468 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:20:26.041590 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:20:26.074121 216336 cri.go:89] found id: "7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7"
I1119 22:20:26.074147 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:20:26.074156 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:26.074161 216336 cri.go:89] found id: ""
I1119 22:20:26.074169 216336 logs.go:282] 3 containers: [7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:20:26.074227 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:26.080252 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:26.086170 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:26.090514 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:20:26.090588 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:20:26.119338 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:26.119365 216336 cri.go:89] found id: ""
I1119 22:20:26.119375 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:20:26.119431 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:26.123237 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:20:26.123308 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:20:26.150429 216336 cri.go:89] found id: ""
I1119 22:20:26.150465 216336 logs.go:282] 0 containers: []
W1119 22:20:26.150475 216336 logs.go:284] No container was found matching "coredns"
I1119 22:20:26.150488 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:20:26.150553 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:20:26.180127 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:26.180150 216336 cri.go:89] found id: ""
I1119 22:20:26.180167 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:20:26.180222 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:26.185074 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:20:26.185141 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:20:26.216334 216336 cri.go:89] found id: ""
I1119 22:20:26.216362 216336 logs.go:282] 0 containers: []
W1119 22:20:26.216373 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:20:26.216381 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:20:26.216440 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:20:26.246928 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:26.246952 216336 cri.go:89] found id: ""
I1119 22:20:26.246962 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:20:26.247027 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:26.252210 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:20:26.252281 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:20:26.283008 216336 cri.go:89] found id: ""
I1119 22:20:26.283052 216336 logs.go:282] 0 containers: []
W1119 22:20:26.283086 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:20:26.283101 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:20:26.283160 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:20:26.311983 216336 cri.go:89] found id: ""
I1119 22:20:26.312016 216336 logs.go:282] 0 containers: []
W1119 22:20:26.312026 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:20:26.312040 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:20:26.312059 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:20:26.372080 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:20:26.372108 216336 logs.go:123] Gathering logs for kube-apiserver [7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7] ...
I1119 22:20:26.372123 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7"
I1119 22:20:26.410125 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:20:26.410156 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:20:26.445052 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:20:26.445081 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:20:26.488314 216336 logs.go:123] Gathering logs for container status ...
I1119 22:20:26.488348 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:20:26.519759 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:20:26.519786 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:20:26.607720 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:20:26.607753 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:20:26.622164 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:20:26.622196 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:26.658569 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:20:26.658598 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:26.690380 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:20:26.690410 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:26.723334 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:20:26.723368 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:29.254435 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:20:29.254927 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1119 22:20:29.254988 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:20:29.255050 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:20:29.281477 216336 cri.go:89] found id: "7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7"
I1119 22:20:29.281503 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:29.281509 216336 cri.go:89] found id: ""
I1119 22:20:29.281518 216336 logs.go:282] 2 containers: [7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:20:29.281576 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:29.285991 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:29.289786 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:20:29.289841 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:20:29.315177 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:29.315199 216336 cri.go:89] found id: ""
I1119 22:20:29.315208 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:20:29.315264 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:29.319376 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:20:29.319444 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:20:29.346951 216336 cri.go:89] found id: ""
I1119 22:20:29.346973 216336 logs.go:282] 0 containers: []
W1119 22:20:29.346980 216336 logs.go:284] No container was found matching "coredns"
I1119 22:20:29.346998 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:20:29.347043 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:20:29.374529 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:29.374549 216336 cri.go:89] found id: ""
I1119 22:20:29.374556 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:20:29.374608 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:29.378833 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:20:29.378918 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:20:29.409418 216336 cri.go:89] found id: ""
I1119 22:20:29.409456 216336 logs.go:282] 0 containers: []
W1119 22:20:29.409468 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:20:29.409476 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:20:29.409542 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:20:29.439747 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:29.439767 216336 cri.go:89] found id: ""
I1119 22:20:29.439775 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:20:29.439832 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:29.443967 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:20:29.444041 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:20:29.469669 216336 cri.go:89] found id: ""
I1119 22:20:29.469695 216336 logs.go:282] 0 containers: []
W1119 22:20:29.469705 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:20:29.469712 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:20:29.469769 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:20:29.496972 216336 cri.go:89] found id: ""
I1119 22:20:29.497000 216336 logs.go:282] 0 containers: []
W1119 22:20:29.497009 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:20:29.497026 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:20:29.497039 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:20:29.585833 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:20:29.585865 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:20:29.600450 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:20:29.600488 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:29.634599 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:20:29.634632 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:20:29.694751 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:20:29.694785 216336 logs.go:123] Gathering logs for kube-apiserver [7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7] ...
I1119 22:20:29.694799 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7"
I1119 22:20:29.728982 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:20:29.729009 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:29.762543 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:20:29.762572 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:29.794342 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:20:29.794374 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:29.828582 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:20:29.828610 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:20:29.874642 216336 logs.go:123] Gathering logs for container status ...
I1119 22:20:29.874672 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
W1119 22:20:31.341227 248121 node_ready.go:57] node "no-preload-638439" has "Ready":"False" status (will retry)
W1119 22:20:33.840869 248121 node_ready.go:57] node "no-preload-638439" has "Ready":"False" status (will retry)
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
d5768828ca04f 56cc512116c8f 7 seconds ago Running busybox 0 36bf64ba3c00d busybox default
dcb27a5492378 ead0a4a53df89 13 seconds ago Running coredns 0 6a75c4192812f coredns-5dd5756b68-8hdh7 kube-system
537c778c87f9d 6e38f40d628db 13 seconds ago Running storage-provisioner 0 8fa22b8d20a3f storage-provisioner kube-system
9f637c51ffa43 409467f978b4a 24 seconds ago Running kindnet-cni 0 cb55d544de2ea kindnet-mlzfc kube-system
bfde9418adc9d ea1030da44aa1 27 seconds ago Running kube-proxy 0 4ca7d14c5d50a kube-proxy-rnxxf kube-system
814e6989c6431 f6f496300a2ae 46 seconds ago Running kube-scheduler 0 f5ceb3a12bb84 kube-scheduler-old-k8s-version-975700 kube-system
1870cf3b3c44b bb5e0dde9054c 46 seconds ago Running kube-apiserver 0 52831c15e2557 kube-apiserver-old-k8s-version-975700 kube-system
97883579e01ac 73deb9a3f7025 46 seconds ago Running etcd 0 e63e84e034d31 etcd-old-k8s-version-975700 kube-system
f4532683638eb 4be79c38a4bab 46 seconds ago Running kube-controller-manager 0 250cc7adfeba7 kube-controller-manager-old-k8s-version-975700 kube-system
==> containerd <==
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.712366614Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-5dd5756b68-8hdh7,Uid:a4057bf2-fe2e-42db-83e9-bc625724c61c,Namespace:kube-system,Attempt:0,} returns sandbox id \"6a75c4192812faee0e855fcba490a6d63eeaa3e8229ace4b9a3a2b128e801116\""
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.715553681Z" level=info msg="CreateContainer within sandbox \"6a75c4192812faee0e855fcba490a6d63eeaa3e8229ace4b9a3a2b128e801116\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.722344581Z" level=info msg="Container dcb27a5492378c9249ef7c6af871ff41c7849ef2087b13036c4112f3826f90bf: CDI devices from CRI Config.CDIDevices: []"
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.728923728Z" level=info msg="CreateContainer within sandbox \"6a75c4192812faee0e855fcba490a6d63eeaa3e8229ace4b9a3a2b128e801116\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"dcb27a5492378c9249ef7c6af871ff41c7849ef2087b13036c4112f3826f90bf\""
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.729475146Z" level=info msg="StartContainer for \"dcb27a5492378c9249ef7c6af871ff41c7849ef2087b13036c4112f3826f90bf\""
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.730499329Z" level=info msg="connecting to shim dcb27a5492378c9249ef7c6af871ff41c7849ef2087b13036c4112f3826f90bf" address="unix:///run/containerd/s/34a674b328f7f600d36cfd77d784cd14517a5b33bcc634daaca7b6dd09032aa9" protocol=ttrpc version=3
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.757547812Z" level=info msg="StartContainer for \"537c778c87f9d8c20894001938b5632c0e5dcc6b1095fb4d266fd4b3995811b2\" returns successfully"
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.786711759Z" level=info msg="StartContainer for \"dcb27a5492378c9249ef7c6af871ff41c7849ef2087b13036c4112f3826f90bf\" returns successfully"
Nov 19 22:20:26 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:26.134603361Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:b49caea0-80e8-4473-ac1f-f9bd327c3754,Namespace:default,Attempt:0,}"
Nov 19 22:20:26 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:26.185916874Z" level=info msg="connecting to shim 36bf64ba3c00d9e0c7f71f899e9cd21577248641d207dcfc98340d1d6b3cb0d0" address="unix:///run/containerd/s/c0d7613134ce7e47335ad17357d4a66a2ab52af6386e2abf7c0d2ac536b7f638" namespace=k8s.io protocol=ttrpc version=3
Nov 19 22:20:26 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:26.262497493Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:b49caea0-80e8-4473-ac1f-f9bd327c3754,Namespace:default,Attempt:0,} returns sandbox id \"36bf64ba3c00d9e0c7f71f899e9cd21577248641d207dcfc98340d1d6b3cb0d0\""
Nov 19 22:20:26 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:26.264162086Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.373146514Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.374074587Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396641"
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.375650212Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.378263887Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.378735365Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.114534001s"
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.378776793Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.380562536Z" level=info msg="CreateContainer within sandbox \"36bf64ba3c00d9e0c7f71f899e9cd21577248641d207dcfc98340d1d6b3cb0d0\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.389100774Z" level=info msg="Container d5768828ca04f9295bf18e3fc30308deb6547c5a50a2782f1e71634c15ae7e9a: CDI devices from CRI Config.CDIDevices: []"
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.397616150Z" level=info msg="CreateContainer within sandbox \"36bf64ba3c00d9e0c7f71f899e9cd21577248641d207dcfc98340d1d6b3cb0d0\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"d5768828ca04f9295bf18e3fc30308deb6547c5a50a2782f1e71634c15ae7e9a\""
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.398260870Z" level=info msg="StartContainer for \"d5768828ca04f9295bf18e3fc30308deb6547c5a50a2782f1e71634c15ae7e9a\""
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.399512803Z" level=info msg="connecting to shim d5768828ca04f9295bf18e3fc30308deb6547c5a50a2782f1e71634c15ae7e9a" address="unix:///run/containerd/s/c0d7613134ce7e47335ad17357d4a66a2ab52af6386e2abf7c0d2ac536b7f638" protocol=ttrpc version=3
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.458456492Z" level=info msg="StartContainer for \"d5768828ca04f9295bf18e3fc30308deb6547c5a50a2782f1e71634c15ae7e9a\" returns successfully"
Nov 19 22:20:34 old-k8s-version-975700 containerd[666]: E1119 22:20:34.905114 666 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [dcb27a5492378c9249ef7c6af871ff41c7849ef2087b13036c4112f3826f90bf] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 4c7f44b73086be760ec9e64204f63c5cc5a952c8c1c55ba0b41d8fc3315ce3c7d0259d04847cb8b4561043d4549603f3bccfd9b397eeb814eef159d244d26f39
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:48436 - 61 "HINFO IN 2387730691433537035.6546186387081931462. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.161284203s
==> describe nodes <==
Name: old-k8s-version-975700
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-975700
kubernetes.io/os=linux
minikube.k8s.io/commit=08454a179ffa60c8ae500105aac58654b5cdef58
minikube.k8s.io/name=old-k8s-version-975700
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_19T22_19_55_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Wed, 19 Nov 2025 22:19:51 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-975700
AcquireTime: <unset>
RenewTime: Wed, 19 Nov 2025 22:20:35 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Wed, 19 Nov 2025 22:20:25 +0000 Wed, 19 Nov 2025 22:19:50 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 19 Nov 2025 22:20:25 +0000 Wed, 19 Nov 2025 22:19:50 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 19 Nov 2025 22:20:25 +0000 Wed, 19 Nov 2025 22:19:50 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 19 Nov 2025 22:20:25 +0000 Wed, 19 Nov 2025 22:20:22 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.94.2
Hostname: old-k8s-version-975700
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
System Info:
Machine ID: 2a4f1f11dedb3fb2ad8898bb691dcfbb
System UUID: 3fcee5dd-d370-4209-8cfb-b52e4110e73b
Boot ID: f21fb8e8-9754-4dc5-a8d9-ce41ba5f6057
Kernel Version: 6.8.0-1043-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11s
kube-system coredns-5dd5756b68-8hdh7 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 29s
kube-system etcd-old-k8s-version-975700 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 42s
kube-system kindnet-mlzfc 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 29s
kube-system kube-apiserver-old-k8s-version-975700 250m (3%) 0 (0%) 0 (0%) 0 (0%) 44s
kube-system kube-controller-manager-old-k8s-version-975700 200m (2%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system kube-proxy-rnxxf 0 (0%) 0 (0%) 0 (0%) 0 (0%) 29s
kube-system kube-scheduler-old-k8s-version-975700 100m (1%) 0 (0%) 0 (0%) 0 (0%) 43s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 28s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 27s kube-proxy
Normal Starting 42s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 42s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 42s kubelet Node old-k8s-version-975700 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 42s kubelet Node old-k8s-version-975700 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 42s kubelet Node old-k8s-version-975700 status is now: NodeHasSufficientPID
Normal RegisteredNode 30s node-controller Node old-k8s-version-975700 event: Registered Node old-k8s-version-975700 in Controller
Normal NodeReady 14s kubelet Node old-k8s-version-975700 status is now: NodeReady
==> dmesg <==
[Nov19 21:17] MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.
[ +0.001836] TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.
[ +0.000999] MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.
[ +0.089012] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.424964] i8042: Warning: Keylock active
[ +0.011946] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.499038] block sda: the capability attribute has been deprecated.
[ +0.090446] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.026259] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +4.862736] kauditd_printk_skb: 47 callbacks suppressed
==> etcd [97883579e01acd8bc2695b07f55c948f3a46c160bf534f88de73606eaba10069] <==
{"level":"info","ts":"2025-11-19T22:19:49.465492Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.94.2:2380"}
{"level":"info","ts":"2025-11-19T22:19:49.465528Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.94.2:2380"}
{"level":"info","ts":"2025-11-19T22:19:50.345522Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-19T22:19:50.345562Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-19T22:19:50.345577Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 received MsgPreVoteResp from dfc97eb0aae75b33 at term 1"}
{"level":"info","ts":"2025-11-19T22:19:50.345588Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became candidate at term 2"}
{"level":"info","ts":"2025-11-19T22:19:50.345593Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 received MsgVoteResp from dfc97eb0aae75b33 at term 2"}
{"level":"info","ts":"2025-11-19T22:19:50.345601Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became leader at term 2"}
{"level":"info","ts":"2025-11-19T22:19:50.345607Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: dfc97eb0aae75b33 elected leader dfc97eb0aae75b33 at term 2"}
{"level":"info","ts":"2025-11-19T22:19:50.346237Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:19:50.346786Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-19T22:19:50.346778Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"dfc97eb0aae75b33","local-member-attributes":"{Name:old-k8s-version-975700 ClientURLs:[https://192.168.94.2:2379]}","request-path":"/0/members/dfc97eb0aae75b33/attributes","cluster-id":"da400bbece288f5a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-19T22:19:50.346819Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-19T22:19:50.34703Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"da400bbece288f5a","local-member-id":"dfc97eb0aae75b33","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:19:50.347114Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-19T22:19:50.347198Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-19T22:19:50.347172Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:19:50.347229Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:19:50.34807Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-19T22:19:50.348559Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.94.2:2379"}
{"level":"warn","ts":"2025-11-19T22:19:52.006287Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"208.664484ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/leases/kube-system/apiserver-pcqkfx5qiyeeley4bpw5zibjhu\" ","response":"range_response_count:0 size:4"}
{"level":"info","ts":"2025-11-19T22:19:52.0064Z","caller":"traceutil/trace.go:171","msg":"trace[898828708] range","detail":"{range_begin:/registry/leases/kube-system/apiserver-pcqkfx5qiyeeley4bpw5zibjhu; range_end:; response_count:0; response_revision:69; }","duration":"208.799616ms","start":"2025-11-19T22:19:51.797579Z","end":"2025-11-19T22:19:52.006378Z","steps":["trace[898828708] 'range keys from in-memory index tree' (duration: 208.571934ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-19T22:20:07.925909Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"124.040627ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/configmaps/kube-system/coredns\" ","response":"range_response_count:1 size:612"}
{"level":"info","ts":"2025-11-19T22:20:07.925985Z","caller":"traceutil/trace.go:171","msg":"trace[1355111703] range","detail":"{range_begin:/registry/configmaps/kube-system/coredns; range_end:; response_count:1; response_revision:350; }","duration":"124.145953ms","start":"2025-11-19T22:20:07.801823Z","end":"2025-11-19T22:20:07.925969Z","steps":["trace[1355111703] 'range keys from in-memory index tree' (duration: 123.893977ms)"],"step_count":1}
{"level":"info","ts":"2025-11-19T22:20:07.945114Z","caller":"traceutil/trace.go:171","msg":"trace[986567943] transaction","detail":"{read_only:false; response_revision:351; number_of_response:1; }","duration":"142.590181ms","start":"2025-11-19T22:20:07.802499Z","end":"2025-11-19T22:20:07.945089Z","steps":["trace[986567943] 'process raft request' (duration: 142.419431ms)"],"step_count":1}
==> kernel <==
22:20:36 up 1:02, 0 user, load average: 4.39, 3.37, 2.10
Linux old-k8s-version-975700 6.8.0-1043-gcp #46~22.04.1-Ubuntu SMP Wed Oct 22 19:00:03 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [9f637c51ffa434a826f6584d8a7faf4701e1f09be3a0f36a1d28e02a37c6fb8d] <==
I1119 22:20:11.957590 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1119 22:20:11.957822 1 main.go:139] hostIP = 192.168.94.2
podIP = 192.168.94.2
I1119 22:20:11.958041 1 main.go:148] setting mtu 1500 for CNI
I1119 22:20:11.958058 1 main.go:178] kindnetd IP family: "ipv4"
I1119 22:20:11.958074 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-19T22:20:12Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1119 22:20:12.159373 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1119 22:20:12.159514 1 controller.go:381] "Waiting for informer caches to sync"
I1119 22:20:12.159531 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1119 22:20:12.159716 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1119 22:20:12.538063 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1119 22:20:12.538126 1 metrics.go:72] Registering metrics
I1119 22:20:12.538374 1 controller.go:711] "Syncing nftables rules"
I1119 22:20:22.164952 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}]
I1119 22:20:22.165012 1 main.go:301] handling current node
I1119 22:20:32.161088 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}]
I1119 22:20:32.161124 1 main.go:301] handling current node
==> kube-apiserver [1870cf3b3c44ba81df1590d986f8a70efb48ac5a464f0a3d4d757b18fc420709] <==
I1119 22:19:51.591405 1 apf_controller.go:377] Running API Priority and Fairness config worker
I1119 22:19:51.591414 1 apf_controller.go:380] Running API Priority and Fairness periodic rebalancing process
I1119 22:19:51.591407 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1119 22:19:51.591438 1 cache.go:39] Caches are synced for autoregister controller
I1119 22:19:51.591387 1 shared_informer.go:318] Caches are synced for configmaps
I1119 22:19:51.593118 1 controller.go:624] quota admission added evaluator for: namespaces
E1119 22:19:51.595601 1 controller.go:146] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms"
I1119 22:19:51.608554 1 shared_informer.go:318] Caches are synced for node_authorizer
I1119 22:19:52.008399 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1119 22:19:52.497067 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1119 22:19:52.500707 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1119 22:19:52.500727 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1119 22:19:52.938966 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1119 22:19:52.979169 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1119 22:19:53.101027 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1119 22:19:53.107157 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.94.2]
I1119 22:19:53.108241 1 controller.go:624] quota admission added evaluator for: endpoints
I1119 22:19:53.112503 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1119 22:19:53.552446 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1119 22:19:54.613121 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1119 22:19:54.625563 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1119 22:19:54.635960 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1119 22:20:06.459115 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1119 22:20:07.162080 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1119 22:20:07.162080 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [f4532683638eb7620857fe45f4fd3c3ed09ef48600c71e8fb4fb0f9dae88bfb2] <==
I1119 22:20:06.563934 1 event.go:307] "Event occurred" object="kube-system/kube-scheduler-old-k8s-version-975700" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1119 22:20:06.565627 1 event.go:307] "Event occurred" object="kube-system/etcd-old-k8s-version-975700" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1119 22:20:06.565755 1 event.go:307] "Event occurred" object="kube-system/kube-apiserver-old-k8s-version-975700" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1119 22:20:06.609574 1 shared_informer.go:318] Caches are synced for resource quota
I1119 22:20:06.927535 1 shared_informer.go:318] Caches are synced for garbage collector
I1119 22:20:07.000472 1 shared_informer.go:318] Caches are synced for garbage collector
I1119 22:20:07.000512 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1119 22:20:07.173283 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-rnxxf"
I1119 22:20:07.176815 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-mlzfc"
I1119 22:20:07.368445 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-vbfhv"
I1119 22:20:07.377915 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-8hdh7"
I1119 22:20:07.385341 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="921.876981ms"
I1119 22:20:07.403436 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="18.02637ms"
I1119 22:20:07.403590 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="97µs"
I1119 22:20:08.346162 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1119 22:20:08.357372 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-vbfhv"
I1119 22:20:08.366742 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="21.01104ms"
I1119 22:20:08.373376 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="6.555995ms"
I1119 22:20:08.373523 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="103.519µs"
I1119 22:20:22.284386 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="125.689µs"
I1119 22:20:22.302759 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="82.916µs"
I1119 22:20:23.804590 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="9.984643ms"
I1119 22:20:23.825468 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="9.454615ms"
I1119 22:20:23.825553 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="44.257µs"
I1119 22:20:26.560333 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [bfde9418adc9d7aba501fe3c84086b7de3e6632fdd8aabb2eb31e57c6302f8a1] <==
I1119 22:20:08.542091 1 server_others.go:69] "Using iptables proxy"
I1119 22:20:08.554521 1 node.go:141] Successfully retrieved node IP: 192.168.94.2
I1119 22:20:08.579485 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1119 22:20:08.581958 1 server_others.go:152] "Using iptables Proxier"
I1119 22:20:08.581998 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1119 22:20:08.582008 1 server_others.go:438] "Defaulting to no-op detect-local"
I1119 22:20:08.582058 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1119 22:20:08.582375 1 server.go:846] "Version info" version="v1.28.0"
I1119 22:20:08.582389 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1119 22:20:08.584350 1 config.go:315] "Starting node config controller"
I1119 22:20:08.584377 1 shared_informer.go:311] Waiting for caches to sync for node config
I1119 22:20:08.584426 1 config.go:188] "Starting service config controller"
I1119 22:20:08.584459 1 shared_informer.go:311] Waiting for caches to sync for service config
I1119 22:20:08.584486 1 config.go:97] "Starting endpoint slice config controller"
I1119 22:20:08.584491 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1119 22:20:08.684578 1 shared_informer.go:318] Caches are synced for service config
I1119 22:20:08.684601 1 shared_informer.go:318] Caches are synced for endpoint slice config
I1119 22:20:08.684577 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [814e6989c64319d934f5f210646b29c75985c3fe82e3642066c6cced56537e32] <==
W1119 22:19:51.558017 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1119 22:19:51.558302 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1119 22:19:51.557982 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1119 22:19:51.558323 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1119 22:19:51.558217 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1119 22:19:51.558365 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1119 22:19:52.378035 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1119 22:19:52.378068 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1119 22:19:52.502983 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1119 22:19:52.503017 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W1119 22:19:52.577347 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1119 22:19:52.577387 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1119 22:19:52.620635 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1119 22:19:52.620663 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1119 22:19:52.621642 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1119 22:19:52.621673 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1119 22:19:52.622811 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1119 22:19:52.622838 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1119 22:19:52.655572 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1119 22:19:52.655637 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1119 22:19:52.670809 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1119 22:19:52.670851 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1119 22:19:52.738351 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1119 22:19:52.738419 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
I1119 22:19:55.553708 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: I1119 22:20:07.254431 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/e2532f4d-a32b-45a0-b846-1d2ecea1f926-lib-modules\") pod \"kindnet-mlzfc\" (UID: \"e2532f4d-a32b-45a0-b846-1d2ecea1f926\") " pod="kube-system/kindnet-mlzfc"
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: I1119 22:20:07.254510 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fnz9\" (UniqueName: \"kubernetes.io/projected/f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d-kube-api-access-9fnz9\") pod \"kube-proxy-rnxxf\" (UID: \"f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d\") " pod="kube-system/kube-proxy-rnxxf"
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: I1119 22:20:07.254561 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/e2532f4d-a32b-45a0-b846-1d2ecea1f926-cni-cfg\") pod \"kindnet-mlzfc\" (UID: \"e2532f4d-a32b-45a0-b846-1d2ecea1f926\") " pod="kube-system/kindnet-mlzfc"
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: I1119 22:20:07.254783 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d-kube-proxy\") pod \"kube-proxy-rnxxf\" (UID: \"f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d\") " pod="kube-system/kube-proxy-rnxxf"
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: I1119 22:20:07.254836 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d-xtables-lock\") pod \"kube-proxy-rnxxf\" (UID: \"f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d\") " pod="kube-system/kube-proxy-rnxxf"
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: E1119 22:20:07.363793 1560 projected.go:292] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: E1119 22:20:07.363834 1560 projected.go:198] Error preparing data for projected volume kube-api-access-rpv66 for pod kube-system/kindnet-mlzfc: configmap "kube-root-ca.crt" not found
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: E1119 22:20:07.363943 1560 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e2532f4d-a32b-45a0-b846-1d2ecea1f926-kube-api-access-rpv66 podName:e2532f4d-a32b-45a0-b846-1d2ecea1f926 nodeName:}" failed. No retries permitted until 2025-11-19 22:20:07.863913255 +0000 UTC m=+13.276094662 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-rpv66" (UniqueName: "kubernetes.io/projected/e2532f4d-a32b-45a0-b846-1d2ecea1f926-kube-api-access-rpv66") pod "kindnet-mlzfc" (UID: "e2532f4d-a32b-45a0-b846-1d2ecea1f926") : configmap "kube-root-ca.crt" not found
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: E1119 22:20:07.364286 1560 projected.go:292] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: E1119 22:20:07.364311 1560 projected.go:198] Error preparing data for projected volume kube-api-access-9fnz9 for pod kube-system/kube-proxy-rnxxf: configmap "kube-root-ca.crt" not found
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: E1119 22:20:07.364372 1560 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d-kube-api-access-9fnz9 podName:f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d nodeName:}" failed. No retries permitted until 2025-11-19 22:20:07.864353345 +0000 UTC m=+13.276534732 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-9fnz9" (UniqueName: "kubernetes.io/projected/f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d-kube-api-access-9fnz9") pod "kube-proxy-rnxxf" (UID: "f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d") : configmap "kube-root-ca.crt" not found
Nov 19 22:20:08 old-k8s-version-975700 kubelet[1560]: I1119 22:20:08.753381 1560 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-rnxxf" podStartSLOduration=1.753327393 podCreationTimestamp="2025-11-19 22:20:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-19 22:20:08.753080476 +0000 UTC m=+14.165261906" watchObservedRunningTime="2025-11-19 22:20:08.753327393 +0000 UTC m=+14.165508800"
Nov 19 22:20:12 old-k8s-version-975700 kubelet[1560]: I1119 22:20:12.861606 1560 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-mlzfc" podStartSLOduration=2.782502482 podCreationTimestamp="2025-11-19 22:20:07 +0000 UTC" firstStartedPulling="2025-11-19 22:20:08.564687803 +0000 UTC m=+13.976869202" lastFinishedPulling="2025-11-19 22:20:11.643733018 +0000 UTC m=+17.055914418" observedRunningTime="2025-11-19 22:20:12.861400313 +0000 UTC m=+18.273581719" watchObservedRunningTime="2025-11-19 22:20:12.861547698 +0000 UTC m=+18.273729104"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.261744 1560 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.283141 1560 topology_manager.go:215] "Topology Admit Handler" podUID="6c937194-8889-47a0-b05f-7af799e18044" podNamespace="kube-system" podName="storage-provisioner"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.284839 1560 topology_manager.go:215] "Topology Admit Handler" podUID="a4057bf2-fe2e-42db-83e9-bc625724c61c" podNamespace="kube-system" podName="coredns-5dd5756b68-8hdh7"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.465780 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbjsb\" (UniqueName: \"kubernetes.io/projected/6c937194-8889-47a0-b05f-7af799e18044-kube-api-access-xbjsb\") pod \"storage-provisioner\" (UID: \"6c937194-8889-47a0-b05f-7af799e18044\") " pod="kube-system/storage-provisioner"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.465975 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zd7zm\" (UniqueName: \"kubernetes.io/projected/a4057bf2-fe2e-42db-83e9-bc625724c61c-kube-api-access-zd7zm\") pod \"coredns-5dd5756b68-8hdh7\" (UID: \"a4057bf2-fe2e-42db-83e9-bc625724c61c\") " pod="kube-system/coredns-5dd5756b68-8hdh7"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.466031 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/6c937194-8889-47a0-b05f-7af799e18044-tmp\") pod \"storage-provisioner\" (UID: \"6c937194-8889-47a0-b05f-7af799e18044\") " pod="kube-system/storage-provisioner"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.466065 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a4057bf2-fe2e-42db-83e9-bc625724c61c-config-volume\") pod \"coredns-5dd5756b68-8hdh7\" (UID: \"a4057bf2-fe2e-42db-83e9-bc625724c61c\") " pod="kube-system/coredns-5dd5756b68-8hdh7"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.790518 1560 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=14.790461437 podCreationTimestamp="2025-11-19 22:20:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-19 22:20:22.789226683 +0000 UTC m=+28.201408091" watchObservedRunningTime="2025-11-19 22:20:22.790461437 +0000 UTC m=+28.202642846"
Nov 19 22:20:23 old-k8s-version-975700 kubelet[1560]: I1119 22:20:23.794502 1560 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-8hdh7" podStartSLOduration=16.794448045 podCreationTimestamp="2025-11-19 22:20:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-19 22:20:23.792204756 +0000 UTC m=+29.204386163" watchObservedRunningTime="2025-11-19 22:20:23.794448045 +0000 UTC m=+29.206629453"
Nov 19 22:20:25 old-k8s-version-975700 kubelet[1560]: I1119 22:20:25.822716 1560 topology_manager.go:215] "Topology Admit Handler" podUID="b49caea0-80e8-4473-ac1f-f9bd327c3754" podNamespace="default" podName="busybox"
Nov 19 22:20:25 old-k8s-version-975700 kubelet[1560]: I1119 22:20:25.990052 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87p55\" (UniqueName: \"kubernetes.io/projected/b49caea0-80e8-4473-ac1f-f9bd327c3754-kube-api-access-87p55\") pod \"busybox\" (UID: \"b49caea0-80e8-4473-ac1f-f9bd327c3754\") " pod="default/busybox"
Nov 19 22:20:28 old-k8s-version-975700 kubelet[1560]: I1119 22:20:28.806269 1560 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.691001227 podCreationTimestamp="2025-11-19 22:20:25 +0000 UTC" firstStartedPulling="2025-11-19 22:20:26.263867005 +0000 UTC m=+31.676048399" lastFinishedPulling="2025-11-19 22:20:28.379090043 +0000 UTC m=+33.791271442" observedRunningTime="2025-11-19 22:20:28.805872451 +0000 UTC m=+34.218053858" watchObservedRunningTime="2025-11-19 22:20:28.80622427 +0000 UTC m=+34.218405676"
==> storage-provisioner [537c778c87f9d8c20894001938b5632c0e5dcc6b1095fb4d266fd4b3995811b2] <==
I1119 22:20:22.762742 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1119 22:20:22.772216 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1119 22:20:22.772484 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1119 22:20:22.782676 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1119 22:20:22.782729 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"750e6d2d-dbb6-45a4-b78a-de5bffe0f948", APIVersion:"v1", ResourceVersion:"406", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-975700_aeb53126-798f-4b08-be45-abf6358cfbca became leader
I1119 22:20:22.782814 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-975700_aeb53126-798f-4b08-be45-abf6358cfbca!
I1119 22:20:22.883137 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-975700_aeb53126-798f-4b08-be45-abf6358cfbca!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-975700 -n old-k8s-version-975700
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-975700 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-975700
helpers_test.go:243: (dbg) docker inspect old-k8s-version-975700:
-- stdout --
[
{
"Id": "fa1d8405226b204ac72daac6f171881e88b0344b7533643e7e2243a0246fe4ca",
"Created": "2025-11-19T22:19:38.284388499Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 244905,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-19T22:19:38.321569291Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:da868a89527ea3b5fe65ed3ef232d132379e38c55dd4637db2e5af21a1522b2d",
"ResolvConfPath": "/var/lib/docker/containers/fa1d8405226b204ac72daac6f171881e88b0344b7533643e7e2243a0246fe4ca/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/fa1d8405226b204ac72daac6f171881e88b0344b7533643e7e2243a0246fe4ca/hostname",
"HostsPath": "/var/lib/docker/containers/fa1d8405226b204ac72daac6f171881e88b0344b7533643e7e2243a0246fe4ca/hosts",
"LogPath": "/var/lib/docker/containers/fa1d8405226b204ac72daac6f171881e88b0344b7533643e7e2243a0246fe4ca/fa1d8405226b204ac72daac6f171881e88b0344b7533643e7e2243a0246fe4ca-json.log",
"Name": "/old-k8s-version-975700",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-975700:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-975700",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "fa1d8405226b204ac72daac6f171881e88b0344b7533643e7e2243a0246fe4ca",
"LowerDir": "/var/lib/docker/overlay2/82f9fc885f3a15658949bf3138691f10889fccea52145002efd1a4a56c392ddc-init/diff:/var/lib/docker/overlay2/b09480e350abbb2f4f48b19448dc8e9ddd0de679fdb8cd59ebc5b758a29b344e/diff",
"MergedDir": "/var/lib/docker/overlay2/82f9fc885f3a15658949bf3138691f10889fccea52145002efd1a4a56c392ddc/merged",
"UpperDir": "/var/lib/docker/overlay2/82f9fc885f3a15658949bf3138691f10889fccea52145002efd1a4a56c392ddc/diff",
"WorkDir": "/var/lib/docker/overlay2/82f9fc885f3a15658949bf3138691f10889fccea52145002efd1a4a56c392ddc/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-975700",
"Source": "/var/lib/docker/volumes/old-k8s-version-975700/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-975700",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-975700",
"name.minikube.sigs.k8s.io": "old-k8s-version-975700",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "bdcc92270fe5f34f2b3211c596bcb03676f7d021d1ab19d1405cbc9ff65513fb",
"SandboxKey": "/var/run/docker/netns/bdcc92270fe5",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33058"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33059"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33062"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33060"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33061"
}
]
},
"Networks": {
"old-k8s-version-975700": {
"IPAMConfig": {
"IPv4Address": "192.168.94.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "e025fa4e3e969ab94188de7ccce8cf41b046fa1de9b7b2485f5bcca1daedd849",
"EndpointID": "8cbfdb5bbf934780f84e734118116ddf815c2fea44670767c9e66317e265e4f4",
"Gateway": "192.168.94.1",
"IPAddress": "192.168.94.2",
"MacAddress": "e6:6b:48:9f:07:21",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-975700",
"fa1d8405226b"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-975700 -n old-k8s-version-975700
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-975700 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-975700 logs -n 25: (1.017876639s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────────┬─────────┬─────────┬─────────────────────┬───────────
──────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────────┼─────────┼─────────┼─────────────────────┼───────────
──────────┤
│ start │ -p NoKubernetes-836292 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ NoKubernetes-836292 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ 19 Nov 25 22:19 UTC │
│ ssh │ -p cilium-904997 sudo systemctl status containerd --all --full --no-pager │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo systemctl cat containerd --no-pager │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo cat /lib/systemd/system/containerd.service │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo cat /etc/containerd/config.toml │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo containerd config dump │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo systemctl status crio --all --full --no-pager │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo systemctl cat crio --no-pager │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ ssh │ -p cilium-904997 sudo crio config │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ │
│ delete │ -p cilium-904997 │ cilium-904997 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ 19 Nov 25 22:18 UTC │
│ start │ -p force-systemd-flag-635885 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-flag-635885 │ jenkins │ v1.37.0 │ 19 Nov 25 22:18 UTC │ 19 Nov 25 22:19 UTC │
│ ssh │ -p NoKubernetes-836292 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-836292 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ │
│ ssh │ force-systemd-flag-635885 ssh cat /etc/containerd/config.toml │ force-systemd-flag-635885 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ delete │ -p force-systemd-flag-635885 │ force-systemd-flag-635885 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ stop │ -p NoKubernetes-836292 │ NoKubernetes-836292 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ start │ -p NoKubernetes-836292 --driver=docker --container-runtime=containerd │ NoKubernetes-836292 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ start │ -p cert-options-071115 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-071115 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ ssh │ -p NoKubernetes-836292 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-836292 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ │
│ delete │ -p NoKubernetes-836292 │ NoKubernetes-836292 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ start │ -p old-k8s-version-975700 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-975700 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:20 UTC │
│ ssh │ cert-options-071115 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-071115 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ ssh │ -p cert-options-071115 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-071115 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ delete │ -p cert-options-071115 │ cert-options-071115 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ 19 Nov 25 22:19 UTC │
│ start │ -p no-preload-638439 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ no-preload-638439 │ jenkins │ v1.37.0 │ 19 Nov 25 22:19 UTC │ │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────────┴─────────┴─────────┴─────────────────────┴───────────
──────────┘
==> Last Start <==
Log file created at: 2025/11/19 22:19:48
Running on machine: ubuntu-20-agent-6
Binary: Built with gc go1.24.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1119 22:19:48.990275 248121 out.go:360] Setting OutFile to fd 1 ...
I1119 22:19:48.990406 248121 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1119 22:19:48.990419 248121 out.go:374] Setting ErrFile to fd 2...
I1119 22:19:48.990423 248121 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1119 22:19:48.990627 248121 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21918-9296/.minikube/bin
I1119 22:19:48.991193 248121 out.go:368] Setting JSON to false
I1119 22:19:48.992321 248121 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-6","uptime":3729,"bootTime":1763587060,"procs":293,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1043-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1119 22:19:48.992426 248121 start.go:143] virtualization: kvm guest
I1119 22:19:48.994475 248121 out.go:179] * [no-preload-638439] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1119 22:19:48.995854 248121 out.go:179] - MINIKUBE_LOCATION=21918
I1119 22:19:48.995867 248121 notify.go:221] Checking for updates...
I1119 22:19:48.998724 248121 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1119 22:19:49.000141 248121 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21918-9296/kubeconfig
I1119 22:19:49.004556 248121 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21918-9296/.minikube
I1119 22:19:49.005782 248121 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1119 22:19:49.006906 248121 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1119 22:19:49.008438 248121 config.go:182] Loaded profile config "cert-expiration-207460": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1119 22:19:49.008559 248121 config.go:182] Loaded profile config "kubernetes-upgrade-133839": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1119 22:19:49.008672 248121 config.go:182] Loaded profile config "old-k8s-version-975700": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1119 22:19:49.008773 248121 driver.go:422] Setting default libvirt URI to qemu:///system
I1119 22:19:49.032838 248121 docker.go:124] docker version: linux-29.0.2:Docker Engine - Community
I1119 22:19:49.032973 248121 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1119 22:19:49.090138 248121 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:65 OomKillDisable:false NGoroutines:76 SystemTime:2025-11-19 22:19:49.078907682 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1043-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.0] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1119 22:19:49.090254 248121 docker.go:319] overlay module found
I1119 22:19:49.091878 248121 out.go:179] * Using the docker driver based on user configuration
I1119 22:19:49.093038 248121 start.go:309] selected driver: docker
I1119 22:19:49.093053 248121 start.go:930] validating driver "docker" against <nil>
I1119 22:19:49.093064 248121 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1119 22:19:49.093625 248121 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1119 22:19:49.156775 248121 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:65 OomKillDisable:false NGoroutines:76 SystemTime:2025-11-19 22:19:49.145211302 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1043-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.0] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1119 22:19:49.157058 248121 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1119 22:19:49.157439 248121 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1119 22:19:49.159270 248121 out.go:179] * Using Docker driver with root privileges
I1119 22:19:49.160689 248121 cni.go:84] Creating CNI manager for ""
I1119 22:19:49.160762 248121 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1119 22:19:49.160776 248121 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1119 22:19:49.160859 248121 start.go:353] cluster config:
{Name:no-preload-638439 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-638439 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Containe
rRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1119 22:19:49.162538 248121 out.go:179] * Starting "no-preload-638439" primary control-plane node in "no-preload-638439" cluster
I1119 22:19:49.165506 248121 cache.go:134] Beginning downloading kic base image for docker with containerd
I1119 22:19:49.166733 248121 out.go:179] * Pulling base image v0.0.48-1763561786-21918 ...
I1119 22:19:49.168220 248121 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1119 22:19:49.168286 248121 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 in local docker daemon
I1119 22:19:49.168353 248121 profile.go:143] Saving config to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/config.json ...
I1119 22:19:49.168395 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/config.json: {Name:mk80aa81bbdb1209c6edea855d376fb83f4d3158 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:19:49.168457 248121 cache.go:107] acquiring lock: {Name:mk3047e241e868539f7fa71732db2494bd5accac Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.168492 248121 cache.go:107] acquiring lock: {Name:mkfa0cff605af699ff39a13e0c5b50d01194658e Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.168527 248121 cache.go:107] acquiring lock: {Name:mk97f6c43b208e1a8e4ae123374c490c517b3f77 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.168548 248121 cache.go:115] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 exists
I1119 22:19:49.168561 248121 cache.go:96] cache image "gcr.io/k8s-minikube/storage-provisioner:v5" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5" took 131.881µs
I1119 22:19:49.168577 248121 cache.go:80] save to tar file gcr.io/k8s-minikube/storage-provisioner:v5 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 succeeded
I1119 22:19:49.168586 248121 cache.go:107] acquiring lock: {Name:mk95307f4a2dfa9e7a1dbc92b6b01bf8659d9b13 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.168623 248121 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:49.168652 248121 cache.go:107] acquiring lock: {Name:mk07d9df97c614ffb0affecc21609079d8bc04b5 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.168677 248121 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:49.168687 248121 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:49.168749 248121 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1119 22:19:49.169004 248121 cache.go:107] acquiring lock: {Name:mk5d2dd3f2b18e53fa90921f4c0c75406a912168 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.169610 248121 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:49.169116 248121 cache.go:107] acquiring lock: {Name:mkabd0eddb0cd66931eabcbabac2ddbe82464607 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.170495 248121 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:49.169136 248121 cache.go:107] acquiring lock: {Name:mkc18e74e5d25fdb795ed308cf7ce3da142a9be0 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.170703 248121 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1119 22:19:49.171552 248121 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:49.171558 248121 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1119 22:19:49.171569 248121 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:49.171576 248121 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:49.172459 248121 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:49.172478 248121 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1119 22:19:49.172507 248121 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:49.200114 248121 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 in local docker daemon, skipping pull
I1119 22:19:49.200187 248121 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 exists in daemon, skipping load
I1119 22:19:49.200226 248121 cache.go:243] Successfully downloaded all kic artifacts
I1119 22:19:49.200265 248121 start.go:360] acquireMachinesLock for no-preload-638439: {Name:mk6b4dc7fd24c69d9288f594d61933b094ed5442 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1119 22:19:49.200436 248121 start.go:364] duration metric: took 142.192µs to acquireMachinesLock for "no-preload-638439"
I1119 22:19:49.200608 248121 start.go:93] Provisioning new machine with config: &{Name:no-preload-638439 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-638439 Namespace:default APIServerHAVIP: APIServer
Name:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1119 22:19:49.200727 248121 start.go:125] createHost starting for "" (driver="docker")
I1119 22:19:46.119049 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:19:46.119476 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1119 22:19:46.119522 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:19:46.119566 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:19:46.151572 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:46.151601 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:46.151607 216336 cri.go:89] found id: ""
I1119 22:19:46.151617 216336 logs.go:282] 2 containers: [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:19:46.151687 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:46.155958 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:46.160473 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:19:46.160530 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:19:46.191589 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:46.191612 216336 cri.go:89] found id: ""
I1119 22:19:46.191619 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:19:46.191670 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:46.196383 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:19:46.196437 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:19:46.225509 216336 cri.go:89] found id: ""
I1119 22:19:46.225529 216336 logs.go:282] 0 containers: []
W1119 22:19:46.225540 216336 logs.go:284] No container was found matching "coredns"
I1119 22:19:46.225546 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:19:46.225599 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:19:46.254866 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:46.254913 216336 cri.go:89] found id: ""
I1119 22:19:46.254924 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:19:46.254979 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:46.259701 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:19:46.259765 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:19:46.292564 216336 cri.go:89] found id: ""
I1119 22:19:46.292591 216336 logs.go:282] 0 containers: []
W1119 22:19:46.292601 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:19:46.292608 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:19:46.292667 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:19:46.329564 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:46.329596 216336 cri.go:89] found id: ""
I1119 22:19:46.329606 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:19:46.329667 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:46.335222 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:19:46.335276 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:19:46.367004 216336 cri.go:89] found id: ""
I1119 22:19:46.367028 216336 logs.go:282] 0 containers: []
W1119 22:19:46.367039 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:19:46.367047 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:19:46.367105 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:19:46.399927 216336 cri.go:89] found id: ""
I1119 22:19:46.399974 216336 logs.go:282] 0 containers: []
W1119 22:19:46.399984 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:19:46.400002 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:19:46.400017 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:19:46.463044 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:19:46.463068 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:19:46.463083 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:46.497691 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:19:46.497718 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:46.535424 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:19:46.535455 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:46.575124 216336 logs.go:123] Gathering logs for container status ...
I1119 22:19:46.575154 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:19:46.607742 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:19:46.607769 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:19:46.710299 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:19:46.710332 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:19:46.724051 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:19:46.724080 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:46.762457 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:19:46.762489 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:46.803568 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:19:46.803601 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:19:49.354660 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:19:49.355043 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1119 22:19:49.355109 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:19:49.355169 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:19:49.395681 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:49.395705 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:49.395709 216336 cri.go:89] found id: ""
I1119 22:19:49.395716 216336 logs.go:282] 2 containers: [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:19:49.395781 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:49.403424 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:49.410799 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:19:49.410949 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:19:49.452918 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:49.452941 216336 cri.go:89] found id: ""
I1119 22:19:49.452952 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:19:49.453011 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:49.458252 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:19:49.458323 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:19:49.497813 216336 cri.go:89] found id: ""
I1119 22:19:49.497837 216336 logs.go:282] 0 containers: []
W1119 22:19:49.497855 216336 logs.go:284] No container was found matching "coredns"
I1119 22:19:49.497863 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:19:49.497929 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:19:49.533334 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:49.533350 216336 cri.go:89] found id: ""
I1119 22:19:49.533357 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:19:49.533399 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:49.537784 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:19:49.537858 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:19:49.568018 216336 cri.go:89] found id: ""
I1119 22:19:49.568044 216336 logs.go:282] 0 containers: []
W1119 22:19:49.568056 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:19:49.568063 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:19:49.568119 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:19:49.609525 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:49.609556 216336 cri.go:89] found id: ""
I1119 22:19:49.609566 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:19:49.609626 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:49.616140 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:19:49.616211 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:19:49.655231 216336 cri.go:89] found id: ""
I1119 22:19:49.655262 216336 logs.go:282] 0 containers: []
W1119 22:19:49.655272 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:19:49.655279 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:19:49.655333 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:19:49.689095 216336 cri.go:89] found id: ""
I1119 22:19:49.689153 216336 logs.go:282] 0 containers: []
W1119 22:19:49.689165 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:19:49.689184 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:19:49.689221 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:19:49.810665 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:19:49.810701 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:19:49.901949 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:19:49.901999 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:19:49.902017 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:49.959095 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:19:49.959128 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:50.003553 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:19:50.003592 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:19:50.058586 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:19:50.058623 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:19:50.074307 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:19:50.074340 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:50.111045 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:19:50.111081 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:50.150599 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:19:50.150632 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:50.185189 216336 logs.go:123] Gathering logs for container status ...
I1119 22:19:50.185216 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:19:48.204748 244005 out.go:252] - Booting up control plane ...
I1119 22:19:48.204897 244005 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1119 22:19:48.205005 244005 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1119 22:19:48.206240 244005 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1119 22:19:48.231808 244005 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1119 22:19:48.232853 244005 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1119 22:19:48.232929 244005 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1119 22:19:48.338373 244005 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1119 22:19:49.203330 248121 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1119 22:19:49.203668 248121 start.go:159] libmachine.API.Create for "no-preload-638439" (driver="docker")
I1119 22:19:49.203755 248121 client.go:173] LocalClient.Create starting
I1119 22:19:49.203905 248121 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca.pem
I1119 22:19:49.203977 248121 main.go:143] libmachine: Decoding PEM data...
I1119 22:19:49.204016 248121 main.go:143] libmachine: Parsing certificate...
I1119 22:19:49.204103 248121 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21918-9296/.minikube/certs/cert.pem
I1119 22:19:49.204159 248121 main.go:143] libmachine: Decoding PEM data...
I1119 22:19:49.204190 248121 main.go:143] libmachine: Parsing certificate...
I1119 22:19:49.204684 248121 cli_runner.go:164] Run: docker network inspect no-preload-638439 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1119 22:19:49.233073 248121 cli_runner.go:211] docker network inspect no-preload-638439 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1119 22:19:49.233150 248121 network_create.go:284] running [docker network inspect no-preload-638439] to gather additional debugging logs...
I1119 22:19:49.233181 248121 cli_runner.go:164] Run: docker network inspect no-preload-638439
W1119 22:19:49.260692 248121 cli_runner.go:211] docker network inspect no-preload-638439 returned with exit code 1
I1119 22:19:49.260724 248121 network_create.go:287] error running [docker network inspect no-preload-638439]: docker network inspect no-preload-638439: exit status 1
stdout:
[]
stderr:
Error response from daemon: network no-preload-638439 not found
I1119 22:19:49.260740 248121 network_create.go:289] output of [docker network inspect no-preload-638439]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network no-preload-638439 not found
** /stderr **
I1119 22:19:49.260835 248121 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1119 22:19:49.281699 248121 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-02d9279961e7 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:f2:f0:7b:99:dd:08} reservation:<nil>}
I1119 22:19:49.282496 248121 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-474134d72c89 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:ee:14:41:ce:21:e4} reservation:<nil>}
I1119 22:19:49.283428 248121 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-527206f47d61 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:66:ef:fd:4c:e4:1b} reservation:<nil>}
I1119 22:19:49.284394 248121 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-ac16fd64007f IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:16:dc:21:09:78:e5} reservation:<nil>}
I1119 22:19:49.285073 248121 network.go:211] skipping subnet 192.168.85.0/24 that is taken: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName:br-11547e9c7cf3 IfaceIPv4:192.168.85.1 IfaceMTU:1500 IfaceMAC:a2:49:21:10:91:74} reservation:<nil>}
I1119 22:19:49.286118 248121 network.go:211] skipping subnet 192.168.94.0/24 that is taken: &{IP:192.168.94.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.94.0/24 Gateway:192.168.94.1 ClientMin:192.168.94.2 ClientMax:192.168.94.254 Broadcast:192.168.94.255 IsPrivate:true Interface:{IfaceName:br-e025fa4e3e96 IfaceIPv4:192.168.94.1 IfaceMTU:1500 IfaceMAC:c2:19:71:ce:4a:3c} reservation:<nil>}
I1119 22:19:49.287275 248121 network.go:206] using free private subnet 192.168.103.0/24: &{IP:192.168.103.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.103.0/24 Gateway:192.168.103.1 ClientMin:192.168.103.2 ClientMax:192.168.103.254 Broadcast:192.168.103.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001e92190}
I1119 22:19:49.287353 248121 network_create.go:124] attempt to create docker network no-preload-638439 192.168.103.0/24 with gateway 192.168.103.1 and MTU of 1500 ...
I1119 22:19:49.287448 248121 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.103.0/24 --gateway=192.168.103.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=no-preload-638439 no-preload-638439
I1119 22:19:49.349621 248121 cache.go:162] opening: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1119 22:19:49.349748 248121 network_create.go:108] docker network no-preload-638439 192.168.103.0/24 created
I1119 22:19:49.349780 248121 kic.go:121] calculated static IP "192.168.103.2" for the "no-preload-638439" container
I1119 22:19:49.349859 248121 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1119 22:19:49.350149 248121 cache.go:162] opening: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1119 22:19:49.361305 248121 cache.go:162] opening: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1119 22:19:49.363150 248121 cache.go:162] opening: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1119 22:19:49.375619 248121 cli_runner.go:164] Run: docker volume create no-preload-638439 --label name.minikube.sigs.k8s.io=no-preload-638439 --label created_by.minikube.sigs.k8s.io=true
I1119 22:19:49.389385 248121 cache.go:162] opening: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1119 22:19:49.396358 248121 cache.go:162] opening: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1119 22:19:49.402036 248121 oci.go:103] Successfully created a docker volume no-preload-638439
I1119 22:19:49.402119 248121 cli_runner.go:164] Run: docker run --rm --name no-preload-638439-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-638439 --entrypoint /usr/bin/test -v no-preload-638439:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 -d /var/lib
I1119 22:19:49.404338 248121 cache.go:162] opening: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1119 22:19:49.471774 248121 cache.go:157] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 exists
I1119 22:19:49.471808 248121 cache.go:96] cache image "registry.k8s.io/pause:3.10.1" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1" took 303.216742ms
I1119 22:19:49.471832 248121 cache.go:80] save to tar file registry.k8s.io/pause:3.10.1 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 succeeded
I1119 22:19:49.854076 248121 cache.go:157] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 exists
I1119 22:19:49.854102 248121 cache.go:96] cache image "registry.k8s.io/kube-proxy:v1.34.1" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1" took 685.635122ms
I1119 22:19:49.854114 248121 cache.go:80] save to tar file registry.k8s.io/kube-proxy:v1.34.1 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 succeeded
I1119 22:19:49.969965 248121 oci.go:107] Successfully prepared a docker volume no-preload-638439
I1119 22:19:49.970027 248121 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
W1119 22:19:49.970211 248121 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1119 22:19:49.970251 248121 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1119 22:19:49.970298 248121 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1119 22:19:50.046746 248121 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname no-preload-638439 --name no-preload-638439 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-638439 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=no-preload-638439 --network no-preload-638439 --ip 192.168.103.2 --volume no-preload-638439:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865
I1119 22:19:50.374513 248121 cli_runner.go:164] Run: docker container inspect no-preload-638439 --format={{.State.Running}}
I1119 22:19:50.397354 248121 cli_runner.go:164] Run: docker container inspect no-preload-638439 --format={{.State.Status}}
I1119 22:19:50.420153 248121 cli_runner.go:164] Run: docker exec no-preload-638439 stat /var/lib/dpkg/alternatives/iptables
I1119 22:19:50.480826 248121 oci.go:144] the created container "no-preload-638439" has a running status.
I1119 22:19:50.480855 248121 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa...
I1119 22:19:50.741014 248121 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1119 22:19:50.777653 248121 cli_runner.go:164] Run: docker container inspect no-preload-638439 --format={{.State.Status}}
I1119 22:19:50.805773 248121 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1119 22:19:50.805802 248121 kic_runner.go:114] Args: [docker exec --privileged no-preload-638439 chown docker:docker /home/docker/.ssh/authorized_keys]
I1119 22:19:50.864742 248121 cli_runner.go:164] Run: docker container inspect no-preload-638439 --format={{.State.Status}}
I1119 22:19:50.878812 248121 cache.go:157] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 exists
I1119 22:19:50.878846 248121 cache.go:96] cache image "registry.k8s.io/kube-apiserver:v1.34.1" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1" took 1.709887948s
I1119 22:19:50.878866 248121 cache.go:80] save to tar file registry.k8s.io/kube-apiserver:v1.34.1 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 succeeded
I1119 22:19:50.883024 248121 cache.go:157] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 exists
I1119 22:19:50.883052 248121 cache.go:96] cache image "registry.k8s.io/kube-controller-manager:v1.34.1" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1" took 1.714530905s
I1119 22:19:50.883067 248121 cache.go:80] save to tar file registry.k8s.io/kube-controller-manager:v1.34.1 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 succeeded
I1119 22:19:50.889090 248121 cache.go:157] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 exists
I1119 22:19:50.889119 248121 cache.go:96] cache image "registry.k8s.io/kube-scheduler:v1.34.1" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1" took 1.72053761s
I1119 22:19:50.889134 248121 cache.go:80] save to tar file registry.k8s.io/kube-scheduler:v1.34.1 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 succeeded
I1119 22:19:50.890545 248121 machine.go:94] provisionDockerMachine start ...
I1119 22:19:50.890654 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:50.917029 248121 main.go:143] libmachine: Using SSH client type: native
I1119 22:19:50.917372 248121 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1119 22:19:50.917394 248121 main.go:143] libmachine: About to run SSH command:
hostname
I1119 22:19:50.918143 248121 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:41082->127.0.0.1:33063: read: connection reset by peer
I1119 22:19:50.954753 248121 cache.go:157] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 exists
I1119 22:19:50.954786 248121 cache.go:96] cache image "registry.k8s.io/coredns/coredns:v1.12.1" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1" took 1.785730546s
I1119 22:19:50.954801 248121 cache.go:80] save to tar file registry.k8s.io/coredns/coredns:v1.12.1 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 succeeded
I1119 22:19:51.295575 248121 cache.go:157] /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 exists
I1119 22:19:51.295602 248121 cache.go:96] cache image "registry.k8s.io/etcd:3.6.4-0" -> "/home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0" took 2.126530323s
I1119 22:19:51.295614 248121 cache.go:80] save to tar file registry.k8s.io/etcd:3.6.4-0 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 succeeded
I1119 22:19:51.295629 248121 cache.go:87] Successfully saved all images to host disk.
I1119 22:19:53.340728 244005 kubeadm.go:319] [apiclient] All control plane components are healthy after 5.002509 seconds
I1119 22:19:53.340920 244005 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1119 22:19:53.353852 244005 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1119 22:19:53.877436 244005 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1119 22:19:53.877630 244005 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-975700 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1119 22:19:54.388156 244005 kubeadm.go:319] [bootstrap-token] Using token: cb0uuv.ole7whobrm4tnmeu
I1119 22:19:54.389814 244005 out.go:252] - Configuring RBAC rules ...
I1119 22:19:54.389996 244005 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1119 22:19:54.396226 244005 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1119 22:19:54.404040 244005 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1119 22:19:54.407336 244005 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1119 22:19:54.410095 244005 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1119 22:19:54.412761 244005 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1119 22:19:54.424912 244005 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1119 22:19:54.627091 244005 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1119 22:19:54.803149 244005 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1119 22:19:54.807538 244005 kubeadm.go:319]
I1119 22:19:54.807624 244005 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1119 22:19:54.807631 244005 kubeadm.go:319]
I1119 22:19:54.807719 244005 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1119 22:19:54.807724 244005 kubeadm.go:319]
I1119 22:19:54.807753 244005 kubeadm.go:319] mkdir -p $HOME/.kube
I1119 22:19:54.807821 244005 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1119 22:19:54.807898 244005 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1119 22:19:54.807905 244005 kubeadm.go:319]
I1119 22:19:54.807968 244005 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1119 22:19:54.807973 244005 kubeadm.go:319]
I1119 22:19:54.808037 244005 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1119 22:19:54.808042 244005 kubeadm.go:319]
I1119 22:19:54.808105 244005 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1119 22:19:54.808197 244005 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1119 22:19:54.808278 244005 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1119 22:19:54.808283 244005 kubeadm.go:319]
I1119 22:19:54.808378 244005 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1119 22:19:54.808482 244005 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1119 22:19:54.808488 244005 kubeadm.go:319]
I1119 22:19:54.808581 244005 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token cb0uuv.ole7whobrm4tnmeu \
I1119 22:19:54.808697 244005 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:6123875ff628fb9eedbd72f2253477865aa197083b84a1d60cb6c00de308bc63 \
I1119 22:19:54.808745 244005 kubeadm.go:319] --control-plane
I1119 22:19:54.808753 244005 kubeadm.go:319]
I1119 22:19:54.808860 244005 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1119 22:19:54.808867 244005 kubeadm.go:319]
I1119 22:19:54.808978 244005 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token cb0uuv.ole7whobrm4tnmeu \
I1119 22:19:54.809119 244005 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:6123875ff628fb9eedbd72f2253477865aa197083b84a1d60cb6c00de308bc63
I1119 22:19:54.812703 244005 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1043-gcp\n", err: exit status 1
I1119 22:19:54.812825 244005 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1119 22:19:54.812852 244005 cni.go:84] Creating CNI manager for ""
I1119 22:19:54.812906 244005 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1119 22:19:54.814910 244005 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1119 22:19:52.733247 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:19:52.733770 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1119 22:19:52.733821 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:19:52.733900 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:19:52.766790 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:52.766819 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:52.766824 216336 cri.go:89] found id: ""
I1119 22:19:52.766834 216336 logs.go:282] 2 containers: [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:19:52.766917 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:52.771725 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:52.776283 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:19:52.776357 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:19:52.808152 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:52.808179 216336 cri.go:89] found id: ""
I1119 22:19:52.808190 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:19:52.808260 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:52.812851 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:19:52.812954 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:19:52.844459 216336 cri.go:89] found id: ""
I1119 22:19:52.844483 216336 logs.go:282] 0 containers: []
W1119 22:19:52.844492 216336 logs.go:284] No container was found matching "coredns"
I1119 22:19:52.844499 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:19:52.844560 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:19:52.875911 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:52.875939 216336 cri.go:89] found id: ""
I1119 22:19:52.875948 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:19:52.876008 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:52.880449 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:19:52.880526 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:19:52.913101 216336 cri.go:89] found id: ""
I1119 22:19:52.913139 216336 logs.go:282] 0 containers: []
W1119 22:19:52.913150 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:19:52.913158 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:19:52.913240 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:19:52.945143 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:52.945172 216336 cri.go:89] found id: ""
I1119 22:19:52.945182 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:19:52.945240 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:52.949921 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:19:52.950006 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:19:52.984180 216336 cri.go:89] found id: ""
I1119 22:19:52.984214 216336 logs.go:282] 0 containers: []
W1119 22:19:52.984225 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:19:52.984233 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:19:52.984296 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:19:53.016636 216336 cri.go:89] found id: ""
I1119 22:19:53.016661 216336 logs.go:282] 0 containers: []
W1119 22:19:53.016671 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:19:53.016691 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:19:53.016707 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:53.053700 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:19:53.053730 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:53.088889 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:19:53.088922 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:19:53.104350 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:19:53.104378 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:19:53.165418 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:19:53.165442 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:19:53.165460 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:53.197214 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:19:53.197252 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:53.228109 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:19:53.228145 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:53.261694 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:19:53.261727 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:19:53.302850 216336 logs.go:123] Gathering logs for container status ...
I1119 22:19:53.302891 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:19:53.333442 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:19:53.333466 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:19:54.046074 248121 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-638439
I1119 22:19:54.046106 248121 ubuntu.go:182] provisioning hostname "no-preload-638439"
I1119 22:19:54.046172 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:54.065777 248121 main.go:143] libmachine: Using SSH client type: native
I1119 22:19:54.066044 248121 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1119 22:19:54.066060 248121 main.go:143] libmachine: About to run SSH command:
sudo hostname no-preload-638439 && echo "no-preload-638439" | sudo tee /etc/hostname
I1119 22:19:54.204707 248121 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-638439
I1119 22:19:54.204779 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:54.223401 248121 main.go:143] libmachine: Using SSH client type: native
I1119 22:19:54.223669 248121 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1119 22:19:54.223696 248121 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sno-preload-638439' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 no-preload-638439/g' /etc/hosts;
else
echo '127.0.1.1 no-preload-638439' | sudo tee -a /etc/hosts;
fi
fi
I1119 22:19:54.352178 248121 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1119 22:19:54.352206 248121 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21918-9296/.minikube CaCertPath:/home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21918-9296/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21918-9296/.minikube}
I1119 22:19:54.352222 248121 ubuntu.go:190] setting up certificates
I1119 22:19:54.352230 248121 provision.go:84] configureAuth start
I1119 22:19:54.352301 248121 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-638439
I1119 22:19:54.371286 248121 provision.go:143] copyHostCerts
I1119 22:19:54.371354 248121 exec_runner.go:144] found /home/jenkins/minikube-integration/21918-9296/.minikube/ca.pem, removing ...
I1119 22:19:54.371370 248121 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21918-9296/.minikube/ca.pem
I1119 22:19:54.371451 248121 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21918-9296/.minikube/ca.pem (1078 bytes)
I1119 22:19:54.371570 248121 exec_runner.go:144] found /home/jenkins/minikube-integration/21918-9296/.minikube/cert.pem, removing ...
I1119 22:19:54.371582 248121 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21918-9296/.minikube/cert.pem
I1119 22:19:54.371623 248121 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21918-9296/.minikube/cert.pem (1123 bytes)
I1119 22:19:54.371701 248121 exec_runner.go:144] found /home/jenkins/minikube-integration/21918-9296/.minikube/key.pem, removing ...
I1119 22:19:54.371710 248121 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21918-9296/.minikube/key.pem
I1119 22:19:54.371748 248121 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21918-9296/.minikube/key.pem (1679 bytes)
I1119 22:19:54.371818 248121 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21918-9296/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca-key.pem org=jenkins.no-preload-638439 san=[127.0.0.1 192.168.103.2 localhost minikube no-preload-638439]
I1119 22:19:54.471021 248121 provision.go:177] copyRemoteCerts
I1119 22:19:54.471092 248121 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1119 22:19:54.471126 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:54.492235 248121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa Username:docker}
I1119 22:19:54.594331 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1119 22:19:54.619378 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1119 22:19:54.640347 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1119 22:19:54.663269 248121 provision.go:87] duration metric: took 311.007703ms to configureAuth
I1119 22:19:54.663306 248121 ubuntu.go:206] setting minikube options for container-runtime
I1119 22:19:54.663514 248121 config.go:182] Loaded profile config "no-preload-638439": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1119 22:19:54.663528 248121 machine.go:97] duration metric: took 3.772952055s to provisionDockerMachine
I1119 22:19:54.663538 248121 client.go:176] duration metric: took 5.459757711s to LocalClient.Create
I1119 22:19:54.663558 248121 start.go:167] duration metric: took 5.459889493s to libmachine.API.Create "no-preload-638439"
I1119 22:19:54.663572 248121 start.go:293] postStartSetup for "no-preload-638439" (driver="docker")
I1119 22:19:54.663584 248121 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1119 22:19:54.663643 248121 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1119 22:19:54.663702 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:54.693309 248121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa Username:docker}
I1119 22:19:54.794533 248121 ssh_runner.go:195] Run: cat /etc/os-release
I1119 22:19:54.799614 248121 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1119 22:19:54.799652 248121 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1119 22:19:54.799667 248121 filesync.go:126] Scanning /home/jenkins/minikube-integration/21918-9296/.minikube/addons for local assets ...
I1119 22:19:54.799750 248121 filesync.go:126] Scanning /home/jenkins/minikube-integration/21918-9296/.minikube/files for local assets ...
I1119 22:19:54.799853 248121 filesync.go:149] local asset: /home/jenkins/minikube-integration/21918-9296/.minikube/files/etc/ssl/certs/128212.pem -> 128212.pem in /etc/ssl/certs
I1119 22:19:54.800010 248121 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1119 22:19:54.811703 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/files/etc/ssl/certs/128212.pem --> /etc/ssl/certs/128212.pem (1708 bytes)
I1119 22:19:54.833815 248121 start.go:296] duration metric: took 170.228401ms for postStartSetup
I1119 22:19:54.834269 248121 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-638439
I1119 22:19:54.855648 248121 profile.go:143] Saving config to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/config.json ...
I1119 22:19:54.855997 248121 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1119 22:19:54.856065 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:54.875839 248121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa Username:docker}
I1119 22:19:54.971298 248121 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1119 22:19:54.976558 248121 start.go:128] duration metric: took 5.775804384s to createHost
I1119 22:19:54.976584 248121 start.go:83] releasing machines lock for "no-preload-638439", held for 5.775996243s
I1119 22:19:54.976652 248121 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-638439
I1119 22:19:54.996323 248121 ssh_runner.go:195] Run: cat /version.json
I1119 22:19:54.996379 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:54.996397 248121 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1119 22:19:54.996468 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:19:55.015498 248121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa Username:docker}
I1119 22:19:55.015796 248121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa Username:docker}
I1119 22:19:55.110222 248121 ssh_runner.go:195] Run: systemctl --version
I1119 22:19:55.167157 248121 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1119 22:19:55.172373 248121 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1119 22:19:55.172445 248121 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1119 22:19:55.200823 248121 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1119 22:19:55.200849 248121 start.go:496] detecting cgroup driver to use...
I1119 22:19:55.200917 248121 detect.go:190] detected "systemd" cgroup driver on host os
I1119 22:19:55.200971 248121 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1119 22:19:55.216429 248121 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1119 22:19:55.230198 248121 docker.go:218] disabling cri-docker service (if available) ...
I1119 22:19:55.230259 248121 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1119 22:19:55.247760 248121 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1119 22:19:55.266193 248121 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1119 22:19:55.355176 248121 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1119 22:19:55.456550 248121 docker.go:234] disabling docker service ...
I1119 22:19:55.456609 248121 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1119 22:19:55.479653 248121 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1119 22:19:55.493533 248121 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1119 22:19:55.592560 248121 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1119 22:19:55.702080 248121 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1119 22:19:55.719351 248121 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1119 22:19:55.735307 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1119 22:19:55.748222 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1119 22:19:55.759552 248121 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1119 22:19:55.759604 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1119 22:19:55.771633 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1119 22:19:55.782179 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1119 22:19:55.791940 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1119 22:19:55.801486 248121 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1119 22:19:55.810671 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1119 22:19:55.820637 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1119 22:19:55.830057 248121 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1119 22:19:55.839605 248121 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1119 22:19:55.847930 248121 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1119 22:19:55.856300 248121 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1119 22:19:55.943868 248121 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1119 22:19:56.031481 248121 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1119 22:19:56.031555 248121 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1119 22:19:56.036560 248121 start.go:564] Will wait 60s for crictl version
I1119 22:19:56.036619 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.040772 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1119 22:19:56.068661 248121 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1119 22:19:56.068728 248121 ssh_runner.go:195] Run: containerd --version
I1119 22:19:56.092486 248121 ssh_runner.go:195] Run: containerd --version
I1119 22:19:56.118002 248121 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
I1119 22:19:54.816277 244005 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1119 22:19:54.820558 244005 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1119 22:19:54.820581 244005 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1119 22:19:54.833857 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1119 22:19:55.525202 244005 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1119 22:19:55.525370 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:55.525485 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-975700 minikube.k8s.io/updated_at=2025_11_19T22_19_55_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=08454a179ffa60c8ae500105aac58654b5cdef58 minikube.k8s.io/name=old-k8s-version-975700 minikube.k8s.io/primary=true
I1119 22:19:55.543472 244005 ops.go:34] apiserver oom_adj: -16
I1119 22:19:55.632765 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:56.133706 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:56.632860 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:57.133046 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:56.119594 248121 cli_runner.go:164] Run: docker network inspect no-preload-638439 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1119 22:19:56.139074 248121 ssh_runner.go:195] Run: grep 192.168.103.1 host.minikube.internal$ /etc/hosts
I1119 22:19:56.143662 248121 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.103.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1119 22:19:56.156640 248121 kubeadm.go:884] updating cluster {Name:no-preload-638439 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-638439 Namespace:default APIServerHAVIP: APIServerName:minikubeCA API
ServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemu
FirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1119 22:19:56.156774 248121 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1119 22:19:56.156835 248121 ssh_runner.go:195] Run: sudo crictl images --output json
I1119 22:19:56.185228 248121 containerd.go:623] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.34.1". assuming images are not preloaded.
I1119 22:19:56.185258 248121 cache_images.go:90] LoadCachedImages start: [registry.k8s.io/kube-apiserver:v1.34.1 registry.k8s.io/kube-controller-manager:v1.34.1 registry.k8s.io/kube-scheduler:v1.34.1 registry.k8s.io/kube-proxy:v1.34.1 registry.k8s.io/pause:3.10.1 registry.k8s.io/etcd:3.6.4-0 registry.k8s.io/coredns/coredns:v1.12.1 gcr.io/k8s-minikube/storage-provisioner:v5]
I1119 22:19:56.185326 248121 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:56.185359 248121 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:56.185391 248121 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1119 22:19:56.185403 248121 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1119 22:19:56.185415 248121 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:56.185453 248121 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:56.185334 248121 image.go:138] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:19:56.185400 248121 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:56.186856 248121 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:56.186874 248121 image.go:181] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:19:56.186979 248121 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:56.186979 248121 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:56.187070 248121 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1119 22:19:56.187094 248121 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:56.187129 248121 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1119 22:19:56.187150 248121 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:56.332716 248121 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.34.1" and sha "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f"
I1119 22:19:56.332783 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:56.332809 248121 containerd.go:267] Checking existence of image with name "registry.k8s.io/coredns/coredns:v1.12.1" and sha "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969"
I1119 22:19:56.332864 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:56.335699 248121 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.34.1" and sha "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97"
I1119 22:19:56.335755 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:56.343400 248121 containerd.go:267] Checking existence of image with name "registry.k8s.io/etcd:3.6.4-0" and sha "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115"
I1119 22:19:56.343484 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/etcd:3.6.4-0
I1119 22:19:56.354423 248121 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.34.1" and sha "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813"
I1119 22:19:56.354489 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:56.357606 248121 cache_images.go:118] "registry.k8s.io/kube-controller-manager:v1.34.1" needs transfer: "registry.k8s.io/kube-controller-manager:v1.34.1" does not exist at hash "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f" in container runtime
I1119 22:19:56.357630 248121 cache_images.go:118] "registry.k8s.io/coredns/coredns:v1.12.1" needs transfer: "registry.k8s.io/coredns/coredns:v1.12.1" does not exist at hash "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969" in container runtime
I1119 22:19:56.357659 248121 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:56.357662 248121 cri.go:218] Removing image: registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:56.357709 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.357709 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.359708 248121 cache_images.go:118] "registry.k8s.io/kube-apiserver:v1.34.1" needs transfer: "registry.k8s.io/kube-apiserver:v1.34.1" does not exist at hash "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97" in container runtime
I1119 22:19:56.359750 248121 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:56.359792 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.365141 248121 containerd.go:267] Checking existence of image with name "registry.k8s.io/pause:3.10.1" and sha "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f"
I1119 22:19:56.365211 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/pause:3.10.1
I1119 22:19:56.370262 248121 cache_images.go:118] "registry.k8s.io/etcd:3.6.4-0" needs transfer: "registry.k8s.io/etcd:3.6.4-0" does not exist at hash "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115" in container runtime
I1119 22:19:56.370317 248121 cri.go:218] Removing image: registry.k8s.io/etcd:3.6.4-0
I1119 22:19:56.370368 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.380909 248121 cache_images.go:118] "registry.k8s.io/kube-scheduler:v1.34.1" needs transfer: "registry.k8s.io/kube-scheduler:v1.34.1" does not exist at hash "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813" in container runtime
I1119 22:19:56.380976 248121 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:56.381006 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:56.381021 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.381050 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:56.381079 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:56.387736 248121 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.34.1" and sha "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7"
I1119 22:19:56.387826 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:56.388049 248121 cache_images.go:118] "registry.k8s.io/pause:3.10.1" needs transfer: "registry.k8s.io/pause:3.10.1" does not exist at hash "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f" in container runtime
I1119 22:19:56.388093 248121 cri.go:218] Removing image: registry.k8s.io/pause:3.10.1
I1119 22:19:56.388134 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:56.388139 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.388097 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1119 22:19:56.419491 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:56.419632 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:56.422653 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:56.424802 248121 cache_images.go:118] "registry.k8s.io/kube-proxy:v1.34.1" needs transfer: "registry.k8s.io/kube-proxy:v1.34.1" does not exist at hash "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7" in container runtime
I1119 22:19:56.424851 248121 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:56.424918 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.426559 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1119 22:19:56.426657 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:56.426745 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1119 22:19:56.457323 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1119 22:19:56.459754 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:56.459823 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1119 22:19:56.459928 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1119 22:19:56.464385 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1119 22:19:56.464524 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1119 22:19:56.464526 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1119 22:19:56.499739 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1119 22:19:56.499837 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1119 22:19:56.504038 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1119 22:19:56.504120 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1
I1119 22:19:56.504047 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:56.504087 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1119 22:19:56.504256 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1
I1119 22:19:56.507722 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1119 22:19:56.507817 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0
I1119 22:19:56.507959 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1119 22:19:56.508035 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-controller-manager_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-controller-manager_v1.34.1': No such file or directory
I1119 22:19:56.508064 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 --> /var/lib/minikube/images/kube-controller-manager_v1.34.1 (22831104 bytes)
I1119 22:19:56.508205 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1119 22:19:56.508348 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1
I1119 22:19:56.515236 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/etcd_3.6.4-0: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/etcd_3.6.4-0': No such file or directory
I1119 22:19:56.515270 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 --> /var/lib/minikube/images/etcd_3.6.4-0 (74320896 bytes)
I1119 22:19:56.555985 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-apiserver_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-apiserver_v1.34.1': No such file or directory
I1119 22:19:56.556025 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 --> /var/lib/minikube/images/kube-apiserver_v1.34.1 (27073024 bytes)
I1119 22:19:56.556078 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1119 22:19:56.556101 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/coredns_v1.12.1: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/coredns_v1.12.1': No such file or directory
I1119 22:19:56.556122 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 --> /var/lib/minikube/images/coredns_v1.12.1 (22394368 bytes)
I1119 22:19:56.571156 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-scheduler_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-scheduler_v1.34.1': No such file or directory
I1119 22:19:56.571205 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1119 22:19:56.571220 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 --> /var/lib/minikube/images/kube-scheduler_v1.34.1 (17396736 bytes)
I1119 22:19:56.571322 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1
I1119 22:19:56.646952 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1119 22:19:56.646960 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/pause_3.10.1: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/pause_3.10.1': No such file or directory
I1119 22:19:56.646995 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 --> /var/lib/minikube/images/pause_3.10.1 (321024 bytes)
I1119 22:19:56.647066 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1
I1119 22:19:56.713984 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-proxy_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-proxy_v1.34.1': No such file or directory
I1119 22:19:56.714047 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 --> /var/lib/minikube/images/kube-proxy_v1.34.1 (25966080 bytes)
I1119 22:19:56.738791 248121 containerd.go:285] Loading image: /var/lib/minikube/images/pause_3.10.1
I1119 22:19:56.738923 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/pause_3.10.1
I1119 22:19:56.888282 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 from cache
I1119 22:19:56.888324 248121 containerd.go:285] Loading image: /var/lib/minikube/images/kube-scheduler_v1.34.1
I1119 22:19:56.888394 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1
I1119 22:19:57.461211 248121 containerd.go:267] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562"
I1119 22:19:57.461286 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:19:57.982686 248121 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1: (1.094253154s)
I1119 22:19:57.982716 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 from cache
I1119 22:19:57.982712 248121 cache_images.go:118] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562" in container runtime
I1119 22:19:57.982738 248121 containerd.go:285] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1119 22:19:57.982764 248121 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:19:57.982789 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1119 22:19:57.982801 248121 ssh_runner.go:195] Run: which crictl
I1119 22:19:58.943228 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 from cache
I1119 22:19:58.943276 248121 containerd.go:285] Loading image: /var/lib/minikube/images/coredns_v1.12.1
I1119 22:19:58.943321 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1
I1119 22:19:58.943326 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:19:55.919868 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:19:55.920354 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1119 22:19:55.920400 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:19:55.920445 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:19:55.949031 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:55.949059 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:55.949065 216336 cri.go:89] found id: ""
I1119 22:19:55.949074 216336 logs.go:282] 2 containers: [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:19:55.949133 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:55.953108 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:55.957378 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:19:55.957442 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:19:55.987066 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:55.987094 216336 cri.go:89] found id: ""
I1119 22:19:55.987104 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:19:55.987165 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:55.991215 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:19:55.991296 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:19:56.020982 216336 cri.go:89] found id: ""
I1119 22:19:56.021011 216336 logs.go:282] 0 containers: []
W1119 22:19:56.021022 216336 logs.go:284] No container was found matching "coredns"
I1119 22:19:56.021031 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:19:56.021093 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:19:56.051114 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:56.051138 216336 cri.go:89] found id: ""
I1119 22:19:56.051147 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:19:56.051210 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.056071 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:19:56.056142 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:19:56.085375 216336 cri.go:89] found id: ""
I1119 22:19:56.085398 216336 logs.go:282] 0 containers: []
W1119 22:19:56.085405 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:19:56.085414 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:19:56.085457 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:19:56.114914 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:56.114941 216336 cri.go:89] found id: ""
I1119 22:19:56.114951 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:19:56.115011 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:56.119718 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:19:56.119785 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:19:56.148992 216336 cri.go:89] found id: ""
I1119 22:19:56.149019 216336 logs.go:282] 0 containers: []
W1119 22:19:56.149029 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:19:56.149037 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:19:56.149096 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:19:56.179135 216336 cri.go:89] found id: ""
I1119 22:19:56.179163 216336 logs.go:282] 0 containers: []
W1119 22:19:56.179173 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:19:56.179190 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:19:56.179204 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:56.216379 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:19:56.216409 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:56.252073 216336 logs.go:123] Gathering logs for container status ...
I1119 22:19:56.252103 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:19:56.283542 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:19:56.283567 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:19:56.381327 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:19:56.381359 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:19:56.399981 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:19:56.400019 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:19:56.493857 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:19:56.493894 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:19:56.493913 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:56.537441 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:19:56.537473 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:19:56.590041 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:19:56.590076 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:56.633876 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:19:56.633925 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:59.179328 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:19:59.179856 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1119 22:19:59.179947 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:19:59.180012 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:19:59.213304 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:59.213329 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:59.213336 216336 cri.go:89] found id: ""
I1119 22:19:59.213346 216336 logs.go:282] 2 containers: [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:19:59.213410 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:59.218953 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:59.223649 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:19:59.223722 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:19:59.256070 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:59.256133 216336 cri.go:89] found id: ""
I1119 22:19:59.256144 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:19:59.256211 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:59.261436 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:19:59.261514 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:19:59.294827 216336 cri.go:89] found id: ""
I1119 22:19:59.294854 216336 logs.go:282] 0 containers: []
W1119 22:19:59.294864 216336 logs.go:284] No container was found matching "coredns"
I1119 22:19:59.294871 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:19:59.294944 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:19:59.328052 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:59.328078 216336 cri.go:89] found id: ""
I1119 22:19:59.328087 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:19:59.328148 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:59.333661 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:19:59.333745 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:19:59.367498 216336 cri.go:89] found id: ""
I1119 22:19:59.367525 216336 logs.go:282] 0 containers: []
W1119 22:19:59.367534 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:19:59.367543 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:19:59.367601 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:19:59.401843 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:59.401868 216336 cri.go:89] found id: ""
I1119 22:19:59.401877 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:19:59.401982 216336 ssh_runner.go:195] Run: which crictl
I1119 22:19:59.406399 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:19:59.406473 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:19:59.437867 216336 cri.go:89] found id: ""
I1119 22:19:59.437948 216336 logs.go:282] 0 containers: []
W1119 22:19:59.437957 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:19:59.437963 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:19:59.438041 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:19:59.465826 216336 cri.go:89] found id: ""
I1119 22:19:59.465856 216336 logs.go:282] 0 containers: []
W1119 22:19:59.465866 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:19:59.465905 216336 logs.go:123] Gathering logs for container status ...
I1119 22:19:59.465953 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:19:59.498633 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:19:59.498670 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:19:59.586643 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:19:59.586677 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:19:59.602123 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:19:59.602148 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:19:59.668657 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:19:59.668675 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:19:59.668702 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:19:59.705026 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:19:59.705060 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:19:59.741520 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:19:59.741550 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:19:59.780920 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:19:59.780952 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:19:59.819532 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:19:59.819572 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:19:59.861394 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:19:59.861428 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:19:57.633270 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:58.133177 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:58.633156 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:59.133958 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:19:59.632816 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:00.133904 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:00.633510 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:01.132810 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:01.632963 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:02.132866 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:00.209856 248121 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1: (1.266503638s)
I1119 22:20:00.209924 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 from cache
I1119 22:20:00.209943 248121 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.266589504s)
I1119 22:20:00.209953 248121 containerd.go:285] Loading image: /var/lib/minikube/images/kube-apiserver_v1.34.1
I1119 22:20:00.210022 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:20:00.210039 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1
I1119 22:20:01.315659 248121 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1: (1.105588091s)
I1119 22:20:01.315688 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 from cache
I1119 22:20:01.315709 248121 containerd.go:285] Loading image: /var/lib/minikube/images/kube-proxy_v1.34.1
I1119 22:20:01.315726 248121 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.105675845s)
I1119 22:20:01.315757 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1
I1119 22:20:01.315796 248121 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:20:02.564406 248121 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1: (1.248612967s)
I1119 22:20:02.564435 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 from cache
I1119 22:20:02.564452 248121 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.248631025s)
I1119 22:20:02.564470 248121 containerd.go:285] Loading image: /var/lib/minikube/images/etcd_3.6.4-0
I1119 22:20:02.564502 248121 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5
I1119 22:20:02.564519 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0
I1119 22:20:02.564590 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5
I1119 22:20:02.568829 248121 ssh_runner.go:352] existence check for /var/lib/minikube/images/storage-provisioner_v5: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/storage-provisioner_v5': No such file or directory
I1119 22:20:02.568862 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 --> /var/lib/minikube/images/storage-provisioner_v5 (9060352 bytes)
I1119 22:20:02.417703 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:20:02.418103 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1119 22:20:02.418159 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:20:02.418203 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:20:02.450244 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:20:02.450266 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:02.450271 216336 cri.go:89] found id: ""
I1119 22:20:02.450280 216336 logs.go:282] 2 containers: [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:20:02.450336 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:02.455477 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:02.460188 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:20:02.460263 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:20:02.491317 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:02.491341 216336 cri.go:89] found id: ""
I1119 22:20:02.491351 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:20:02.491409 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:02.495754 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:20:02.495837 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:20:02.526395 216336 cri.go:89] found id: ""
I1119 22:20:02.526421 216336 logs.go:282] 0 containers: []
W1119 22:20:02.526433 216336 logs.go:284] No container was found matching "coredns"
I1119 22:20:02.526441 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:20:02.526509 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:20:02.556596 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:02.556619 216336 cri.go:89] found id: ""
I1119 22:20:02.556629 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:20:02.556686 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:02.561029 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:20:02.561102 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:20:02.593442 216336 cri.go:89] found id: ""
I1119 22:20:02.593468 216336 logs.go:282] 0 containers: []
W1119 22:20:02.593480 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:20:02.593488 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:20:02.593547 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:20:02.626155 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:02.626181 216336 cri.go:89] found id: ""
I1119 22:20:02.626191 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:20:02.626239 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:02.630831 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:20:02.630910 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:20:02.663060 216336 cri.go:89] found id: ""
I1119 22:20:02.663088 216336 logs.go:282] 0 containers: []
W1119 22:20:02.663098 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:20:02.663106 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:20:02.663159 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:20:02.692104 216336 cri.go:89] found id: ""
I1119 22:20:02.692132 216336 logs.go:282] 0 containers: []
W1119 22:20:02.692142 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:20:02.692159 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:20:02.692172 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:02.730157 216336 logs.go:123] Gathering logs for container status ...
I1119 22:20:02.730198 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:20:02.764408 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:20:02.764435 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:20:02.871409 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:20:02.871460 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:20:02.912737 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:20:02.912778 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:02.958177 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:20:02.958229 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:03.003908 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:20:03.003950 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:20:03.062041 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:20:03.062076 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:20:03.080938 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:20:03.080972 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:20:03.153154 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:20:03.153177 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:20:03.153191 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:02.633509 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:03.132907 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:03.633598 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:04.133836 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:04.632911 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:05.133740 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:05.633397 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:06.133422 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:06.633053 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:07.133122 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:07.632971 244005 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:07.709877 244005 kubeadm.go:1114] duration metric: took 12.184544724s to wait for elevateKubeSystemPrivileges
I1119 22:20:07.709929 244005 kubeadm.go:403] duration metric: took 23.328681682s to StartCluster
I1119 22:20:07.709949 244005 settings.go:142] acquiring lock: {Name:mk3c795849984e82ee99295088dd85252bd75f6f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:07.710024 244005 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21918-9296/kubeconfig
I1119 22:20:07.711281 244005 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/kubeconfig: {Name:mk5b9093863cb8ca8629eea9fd861356875781d2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:07.726769 244005 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1119 22:20:07.726909 244005 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.94.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1119 22:20:07.727036 244005 config.go:182] Loaded profile config "old-k8s-version-975700": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1119 22:20:07.727028 244005 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1119 22:20:07.727107 244005 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-975700"
I1119 22:20:07.727154 244005 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-975700"
I1119 22:20:07.727201 244005 host.go:66] Checking if "old-k8s-version-975700" exists ...
I1119 22:20:07.727269 244005 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-975700"
I1119 22:20:07.727331 244005 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-975700"
I1119 22:20:07.727652 244005 cli_runner.go:164] Run: docker container inspect old-k8s-version-975700 --format={{.State.Status}}
I1119 22:20:07.727759 244005 cli_runner.go:164] Run: docker container inspect old-k8s-version-975700 --format={{.State.Status}}
I1119 22:20:07.759624 244005 out.go:179] * Verifying Kubernetes components...
I1119 22:20:07.760449 244005 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-975700"
I1119 22:20:07.760487 244005 host.go:66] Checking if "old-k8s-version-975700" exists ...
I1119 22:20:07.760848 244005 cli_runner.go:164] Run: docker container inspect old-k8s-version-975700 --format={{.State.Status}}
I1119 22:20:07.781264 244005 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1119 22:20:07.781292 244005 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1119 22:20:07.781358 244005 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-975700
I1119 22:20:07.790624 244005 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:20:07.790705 244005 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1119 22:20:07.805293 244005 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/old-k8s-version-975700/id_rsa Username:docker}
I1119 22:20:07.811125 244005 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1119 22:20:07.811152 244005 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1119 22:20:07.811221 244005 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-975700
I1119 22:20:07.839037 244005 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/old-k8s-version-975700/id_rsa Username:docker}
I1119 22:20:07.927378 244005 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1119 22:20:07.930474 244005 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1119 22:20:07.930565 244005 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.94.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1119 22:20:07.945012 244005 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1119 22:20:08.325616 244005 start.go:977] {"host.minikube.internal": 192.168.94.1} host record injected into CoreDNS's ConfigMap
I1119 22:20:08.326981 244005 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-975700" to be "Ready" ...
I1119 22:20:08.525071 244005 out.go:179] * Enabled addons: default-storageclass, storage-provisioner
I1119 22:20:05.409665 248121 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0: (2.845117956s)
I1119 22:20:05.409701 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 from cache
I1119 22:20:05.409742 248121 containerd.go:285] Loading image: /var/lib/minikube/images/storage-provisioner_v5
I1119 22:20:05.409813 248121 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5
I1119 22:20:05.827105 248121 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21918-9296/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
I1119 22:20:05.827149 248121 cache_images.go:125] Successfully loaded all cached images
I1119 22:20:05.827155 248121 cache_images.go:94] duration metric: took 9.641883158s to LoadCachedImages
I1119 22:20:05.827169 248121 kubeadm.go:935] updating node { 192.168.103.2 8443 v1.34.1 containerd true true} ...
I1119 22:20:05.827281 248121 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-638439 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.103.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:no-preload-638439 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1119 22:20:05.827350 248121 ssh_runner.go:195] Run: sudo crictl info
I1119 22:20:05.854538 248121 cni.go:84] Creating CNI manager for ""
I1119 22:20:05.854565 248121 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1119 22:20:05.854580 248121 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1119 22:20:05.854605 248121 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.103.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-638439 NodeName:no-preload-638439 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.103.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.103.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1119 22:20:05.854728 248121 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.103.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "no-preload-638439"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.103.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.103.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1119 22:20:05.854794 248121 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1119 22:20:05.863483 248121 binaries.go:54] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.34.1: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/binaries/v1.34.1': No such file or directory
Initiating transfer...
I1119 22:20:05.863536 248121 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.34.1
I1119 22:20:05.871942 248121 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl.sha256
I1119 22:20:05.871968 248121 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet.sha256 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/linux/amd64/v1.34.1/kubelet
I1119 22:20:05.871947 248121 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm.sha256 -> /home/jenkins/minikube-integration/21918-9296/.minikube/cache/linux/amd64/v1.34.1/kubeadm
I1119 22:20:05.872035 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl
I1119 22:20:05.876399 248121 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubectl: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubectl': No such file or directory
I1119 22:20:05.876433 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/linux/amd64/v1.34.1/kubectl --> /var/lib/minikube/binaries/v1.34.1/kubectl (60559544 bytes)
I1119 22:20:07.043592 248121 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1119 22:20:07.058665 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet
I1119 22:20:07.063097 248121 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubelet: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubelet': No such file or directory
I1119 22:20:07.063136 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/linux/amd64/v1.34.1/kubelet --> /var/lib/minikube/binaries/v1.34.1/kubelet (59195684 bytes)
I1119 22:20:07.259328 248121 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm
I1119 22:20:07.263904 248121 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubeadm: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubeadm': No such file or directory
I1119 22:20:07.263944 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/cache/linux/amd64/v1.34.1/kubeadm --> /var/lib/minikube/binaries/v1.34.1/kubeadm (74027192 bytes)
I1119 22:20:07.467537 248121 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1119 22:20:07.476103 248121 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (322 bytes)
I1119 22:20:07.489039 248121 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1119 22:20:07.504456 248121 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2232 bytes)
I1119 22:20:07.517675 248121 ssh_runner.go:195] Run: grep 192.168.103.2 control-plane.minikube.internal$ /etc/hosts
I1119 22:20:07.521966 248121 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.103.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1119 22:20:07.532448 248121 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1119 22:20:07.616669 248121 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1119 22:20:07.647854 248121 certs.go:69] Setting up /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439 for IP: 192.168.103.2
I1119 22:20:07.647911 248121 certs.go:195] generating shared ca certs ...
I1119 22:20:07.647941 248121 certs.go:227] acquiring lock for ca certs: {Name:mkfe62d1b64cfdbe1c6a3d1f38aa0edc5b9ec419 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:07.648100 248121 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21918-9296/.minikube/ca.key
I1119 22:20:07.648156 248121 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21918-9296/.minikube/proxy-client-ca.key
I1119 22:20:07.648169 248121 certs.go:257] generating profile certs ...
I1119 22:20:07.648233 248121 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/client.key
I1119 22:20:07.648249 248121 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/client.crt with IP's: []
I1119 22:20:08.248835 248121 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/client.crt ...
I1119 22:20:08.248872 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/client.crt: {Name:mk71551595bc691ff029aa4f22d8136d735c86c8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:08.249095 248121 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/client.key ...
I1119 22:20:08.249107 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/client.key: {Name:mk7714d393e738013c7abe0f1689bcf490e26b5b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:08.249250 248121 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.key.6e1d1cff
I1119 22:20:08.249267 248121 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.crt.6e1d1cff with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.103.2]
I1119 22:20:09.018572 248121 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.crt.6e1d1cff ...
I1119 22:20:09.018603 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.crt.6e1d1cff: {Name:mk1a2db3ea3ff5c82c4c822f2131fbadbd39c724 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:09.018790 248121 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.key.6e1d1cff ...
I1119 22:20:09.018808 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.key.6e1d1cff: {Name:mk13f089d71bdc7abee8608285249f8ab5ad14b2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:09.018926 248121 certs.go:382] copying /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.crt.6e1d1cff -> /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.crt
I1119 22:20:09.019033 248121 certs.go:386] copying /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.key.6e1d1cff -> /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.key
I1119 22:20:09.019118 248121 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.key
I1119 22:20:09.019145 248121 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.crt with IP's: []
I1119 22:20:09.141320 248121 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.crt ...
I1119 22:20:09.141353 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.crt: {Name:mke73db150d5fe88961c2b7ca7e43e6cb8c1e87f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:09.141532 248121 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.key ...
I1119 22:20:09.141550 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.key: {Name:mk65b56a4bcd9d60fdf62f046abf7a5abe0e729f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:09.141750 248121 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/12821.pem (1338 bytes)
W1119 22:20:09.141799 248121 certs.go:480] ignoring /home/jenkins/minikube-integration/21918-9296/.minikube/certs/12821_empty.pem, impossibly tiny 0 bytes
I1119 22:20:09.141812 248121 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca-key.pem (1675 bytes)
I1119 22:20:09.141845 248121 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/ca.pem (1078 bytes)
I1119 22:20:09.141894 248121 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/cert.pem (1123 bytes)
I1119 22:20:09.141928 248121 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-9296/.minikube/certs/key.pem (1679 bytes)
I1119 22:20:09.141984 248121 certs.go:484] found cert: /home/jenkins/minikube-integration/21918-9296/.minikube/files/etc/ssl/certs/128212.pem (1708 bytes)
I1119 22:20:09.142554 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1119 22:20:09.161569 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1119 22:20:09.180990 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1119 22:20:09.199264 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1119 22:20:09.217135 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1119 22:20:09.236364 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1119 22:20:09.255084 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1119 22:20:09.274604 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/profiles/no-preload-638439/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1119 22:20:09.293451 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1119 22:20:09.315834 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/certs/12821.pem --> /usr/share/ca-certificates/12821.pem (1338 bytes)
I1119 22:20:09.336567 248121 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21918-9296/.minikube/files/etc/ssl/certs/128212.pem --> /usr/share/ca-certificates/128212.pem (1708 bytes)
I1119 22:20:09.354248 248121 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1119 22:20:09.367868 248121 ssh_runner.go:195] Run: openssl version
I1119 22:20:09.374260 248121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1119 22:20:09.383332 248121 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1119 22:20:09.387801 248121 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 19 21:48 /usr/share/ca-certificates/minikubeCA.pem
I1119 22:20:09.387864 248121 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1119 22:20:09.424342 248121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1119 22:20:09.433605 248121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/12821.pem && ln -fs /usr/share/ca-certificates/12821.pem /etc/ssl/certs/12821.pem"
I1119 22:20:09.442478 248121 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/12821.pem
I1119 22:20:09.446634 248121 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 19 21:53 /usr/share/ca-certificates/12821.pem
I1119 22:20:09.446694 248121 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/12821.pem
I1119 22:20:09.481876 248121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/12821.pem /etc/ssl/certs/51391683.0"
I1119 22:20:09.491181 248121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/128212.pem && ln -fs /usr/share/ca-certificates/128212.pem /etc/ssl/certs/128212.pem"
I1119 22:20:09.499823 248121 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/128212.pem
I1119 22:20:09.503986 248121 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 19 21:53 /usr/share/ca-certificates/128212.pem
I1119 22:20:09.504043 248121 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/128212.pem
I1119 22:20:09.539481 248121 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/128212.pem /etc/ssl/certs/3ec20f2e.0"
I1119 22:20:09.548630 248121 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1119 22:20:09.552649 248121 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1119 22:20:09.552709 248121 kubeadm.go:401] StartCluster: {Name:no-preload-638439 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763561786-21918@sha256:7d857ffd31ff83715b29c3208933c3dc8deb87751fbabf3dc1f90cf1a3da6865 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-638439 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISer
verNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFir
mwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1119 22:20:09.552800 248121 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1119 22:20:09.552841 248121 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1119 22:20:09.580504 248121 cri.go:89] found id: ""
I1119 22:20:09.580577 248121 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1119 22:20:09.588825 248121 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1119 22:20:09.597263 248121 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1119 22:20:09.597312 248121 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1119 22:20:09.605431 248121 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1119 22:20:09.605448 248121 kubeadm.go:158] found existing configuration files:
I1119 22:20:09.605505 248121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1119 22:20:09.613580 248121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1119 22:20:09.613647 248121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1119 22:20:09.621432 248121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1119 22:20:09.629381 248121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1119 22:20:09.629444 248121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1119 22:20:09.637498 248121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1119 22:20:09.645457 248121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1119 22:20:09.645500 248121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1119 22:20:09.653775 248121 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1119 22:20:09.662581 248121 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1119 22:20:09.662631 248121 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1119 22:20:09.670267 248121 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1119 22:20:09.705969 248121 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1119 22:20:09.706049 248121 kubeadm.go:319] [preflight] Running pre-flight checks
I1119 22:20:09.725461 248121 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1119 22:20:09.725557 248121 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1043-gcp[0m
I1119 22:20:09.725619 248121 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1119 22:20:09.725688 248121 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1119 22:20:09.725759 248121 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1119 22:20:09.725823 248121 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1119 22:20:09.725926 248121 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1119 22:20:09.726011 248121 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1119 22:20:09.726090 248121 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1119 22:20:09.726169 248121 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1119 22:20:09.726247 248121 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1119 22:20:09.785631 248121 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1119 22:20:09.785785 248121 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1119 22:20:09.785930 248121 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1119 22:20:09.790816 248121 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1119 22:20:05.698391 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:20:08.526183 244005 addons.go:515] duration metric: took 799.151282ms for enable addons: enabled=[default-storageclass storage-provisioner]
I1119 22:20:08.830648 244005 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-975700" context rescaled to 1 replicas
W1119 22:20:10.330548 244005 node_ready.go:57] node "old-k8s-version-975700" has "Ready":"False" status (will retry)
W1119 22:20:12.330688 244005 node_ready.go:57] node "old-k8s-version-975700" has "Ready":"False" status (will retry)
I1119 22:20:09.792948 248121 out.go:252] - Generating certificates and keys ...
I1119 22:20:09.793051 248121 kubeadm.go:319] [certs] Using existing ca certificate authority
I1119 22:20:09.793149 248121 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1119 22:20:10.738826 248121 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1119 22:20:10.908170 248121 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1119 22:20:11.291841 248121 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1119 22:20:11.623960 248121 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1119 22:20:11.828384 248121 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1119 22:20:11.828565 248121 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost no-preload-638439] and IPs [192.168.103.2 127.0.0.1 ::1]
I1119 22:20:12.233215 248121 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1119 22:20:12.233354 248121 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost no-preload-638439] and IPs [192.168.103.2 127.0.0.1 ::1]
I1119 22:20:12.358552 248121 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1119 22:20:12.567027 248121 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1119 22:20:12.649341 248121 kubeadm.go:319] [certs] Generating "sa" key and public key
I1119 22:20:12.649468 248121 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1119 22:20:12.821942 248121 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1119 22:20:13.184331 248121 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1119 22:20:13.249251 248121 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1119 22:20:13.507036 248121 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1119 22:20:13.992391 248121 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1119 22:20:13.992949 248121 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1119 22:20:14.073515 248121 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1119 22:20:10.699588 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1119 22:20:10.699656 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:20:10.699719 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:20:10.736721 216336 cri.go:89] found id: "7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7"
I1119 22:20:10.736747 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:20:10.736753 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:10.736758 216336 cri.go:89] found id: ""
I1119 22:20:10.736767 216336 logs.go:282] 3 containers: [7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:20:10.736834 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:10.742155 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:10.747306 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:10.752281 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:20:10.752356 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:20:10.785664 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:10.785691 216336 cri.go:89] found id: ""
I1119 22:20:10.785700 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:20:10.785758 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:10.791037 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:20:10.791107 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:20:10.827690 216336 cri.go:89] found id: ""
I1119 22:20:10.827736 216336 logs.go:282] 0 containers: []
W1119 22:20:10.827749 216336 logs.go:284] No container was found matching "coredns"
I1119 22:20:10.827781 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:20:10.827856 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:20:10.860463 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:10.860489 216336 cri.go:89] found id: ""
I1119 22:20:10.860499 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:20:10.860557 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:10.865818 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:20:10.865902 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:20:10.896395 216336 cri.go:89] found id: ""
I1119 22:20:10.896425 216336 logs.go:282] 0 containers: []
W1119 22:20:10.896457 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:20:10.896464 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:20:10.896524 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:20:10.927065 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:10.927091 216336 cri.go:89] found id: ""
I1119 22:20:10.927100 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:20:10.927157 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:10.931718 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:20:10.931789 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:20:10.960849 216336 cri.go:89] found id: ""
I1119 22:20:10.960892 216336 logs.go:282] 0 containers: []
W1119 22:20:10.960903 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:20:10.960910 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:20:10.960962 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:20:10.993029 216336 cri.go:89] found id: ""
I1119 22:20:10.993057 216336 logs.go:282] 0 containers: []
W1119 22:20:10.993067 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:20:10.993080 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:20:10.993094 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:20:11.027974 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:20:11.028010 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:11.062086 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:20:11.062120 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:11.103210 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:20:11.103250 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:11.145837 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:20:11.145872 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:20:11.199841 216336 logs.go:123] Gathering logs for container status ...
I1119 22:20:11.199937 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:20:11.236586 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:20:11.236618 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:20:11.253432 216336 logs.go:123] Gathering logs for kube-apiserver [7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7] ...
I1119 22:20:11.253487 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7"
I1119 22:20:11.295903 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:20:11.295943 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:11.337708 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:20:11.337745 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:20:11.452249 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:20:11.452285 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:20:14.830008 244005 node_ready.go:57] node "old-k8s-version-975700" has "Ready":"False" status (will retry)
W1119 22:20:16.830268 244005 node_ready.go:57] node "old-k8s-version-975700" has "Ready":"False" status (will retry)
I1119 22:20:14.075591 248121 out.go:252] - Booting up control plane ...
I1119 22:20:14.075701 248121 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1119 22:20:14.075795 248121 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1119 22:20:14.076511 248121 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1119 22:20:14.092600 248121 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1119 22:20:14.092767 248121 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1119 22:20:14.099651 248121 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1119 22:20:14.099786 248121 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1119 22:20:14.099865 248121 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1119 22:20:14.205620 248121 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1119 22:20:14.205784 248121 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1119 22:20:14.707136 248121 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 501.67843ms
I1119 22:20:14.711176 248121 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1119 22:20:14.711406 248121 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.103.2:8443/livez
I1119 22:20:14.711556 248121 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1119 22:20:14.711669 248121 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1119 22:20:16.370429 248121 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 1.659105526s
I1119 22:20:16.919263 248121 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 2.208262146s
I1119 22:20:18.712413 248121 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 4.001122323s
I1119 22:20:18.724319 248121 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1119 22:20:18.734195 248121 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1119 22:20:18.743489 248121 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1119 22:20:18.743707 248121 kubeadm.go:319] [mark-control-plane] Marking the node no-preload-638439 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1119 22:20:18.749843 248121 kubeadm.go:319] [bootstrap-token] Using token: tkvbyg.4blpqvlc8c0koqab
I1119 22:20:18.751541 248121 out.go:252] - Configuring RBAC rules ...
I1119 22:20:18.751647 248121 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1119 22:20:18.754347 248121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1119 22:20:18.760461 248121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1119 22:20:18.763019 248121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1119 22:20:18.765434 248121 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1119 22:20:18.768021 248121 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1119 22:20:19.119568 248121 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1119 22:20:19.537037 248121 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1119 22:20:20.119469 248121 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1119 22:20:20.120399 248121 kubeadm.go:319]
I1119 22:20:20.120467 248121 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1119 22:20:20.120472 248121 kubeadm.go:319]
I1119 22:20:20.120605 248121 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1119 22:20:20.120632 248121 kubeadm.go:319]
I1119 22:20:20.120661 248121 kubeadm.go:319] mkdir -p $HOME/.kube
I1119 22:20:20.120719 248121 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1119 22:20:20.120765 248121 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1119 22:20:20.120772 248121 kubeadm.go:319]
I1119 22:20:20.120845 248121 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1119 22:20:20.120857 248121 kubeadm.go:319]
I1119 22:20:20.121004 248121 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1119 22:20:20.121029 248121 kubeadm.go:319]
I1119 22:20:20.121103 248121 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1119 22:20:20.121207 248121 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1119 22:20:20.121271 248121 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1119 22:20:20.121297 248121 kubeadm.go:319]
I1119 22:20:20.121444 248121 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1119 22:20:20.121523 248121 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1119 22:20:20.121533 248121 kubeadm.go:319]
I1119 22:20:20.121611 248121 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token tkvbyg.4blpqvlc8c0koqab \
I1119 22:20:20.121712 248121 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:6123875ff628fb9eedbd72f2253477865aa197083b84a1d60cb6c00de308bc63 \
I1119 22:20:20.121734 248121 kubeadm.go:319] --control-plane
I1119 22:20:20.121738 248121 kubeadm.go:319]
I1119 22:20:20.121810 248121 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1119 22:20:20.121816 248121 kubeadm.go:319]
I1119 22:20:20.121927 248121 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token tkvbyg.4blpqvlc8c0koqab \
I1119 22:20:20.122034 248121 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:6123875ff628fb9eedbd72f2253477865aa197083b84a1d60cb6c00de308bc63
I1119 22:20:20.124555 248121 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1043-gcp\n", err: exit status 1
I1119 22:20:20.124740 248121 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1119 22:20:20.124773 248121 cni.go:84] Creating CNI manager for ""
I1119 22:20:20.124786 248121 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1119 22:20:20.127350 248121 out.go:179] * Configuring CNI (Container Networking Interface) ...
W1119 22:20:19.330624 244005 node_ready.go:57] node "old-k8s-version-975700" has "Ready":"False" status (will retry)
W1119 22:20:21.830427 244005 node_ready.go:57] node "old-k8s-version-975700" has "Ready":"False" status (will retry)
I1119 22:20:22.330516 244005 node_ready.go:49] node "old-k8s-version-975700" is "Ready"
I1119 22:20:22.330545 244005 node_ready.go:38] duration metric: took 14.003533581s for node "old-k8s-version-975700" to be "Ready" ...
I1119 22:20:22.330557 244005 api_server.go:52] waiting for apiserver process to appear ...
I1119 22:20:22.330607 244005 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1119 22:20:22.343206 244005 api_server.go:72] duration metric: took 14.6162161s to wait for apiserver process to appear ...
I1119 22:20:22.343236 244005 api_server.go:88] waiting for apiserver healthz status ...
I1119 22:20:22.343259 244005 api_server.go:253] Checking apiserver healthz at https://192.168.94.2:8443/healthz ...
I1119 22:20:22.347053 244005 api_server.go:279] https://192.168.94.2:8443/healthz returned 200:
ok
I1119 22:20:22.348151 244005 api_server.go:141] control plane version: v1.28.0
I1119 22:20:22.348175 244005 api_server.go:131] duration metric: took 4.933094ms to wait for apiserver health ...
I1119 22:20:22.348183 244005 system_pods.go:43] waiting for kube-system pods to appear ...
I1119 22:20:22.351821 244005 system_pods.go:59] 8 kube-system pods found
I1119 22:20:22.351849 244005 system_pods.go:61] "coredns-5dd5756b68-8hdh7" [a4057bf2-fe2e-42db-83e9-bc625724c61c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:20:22.351854 244005 system_pods.go:61] "etcd-old-k8s-version-975700" [12a76858-b7be-4963-8323-fe57ca12a08d] Running
I1119 22:20:22.351860 244005 system_pods.go:61] "kindnet-mlzfc" [e2532f4d-a32b-45a0-b846-1d2ecea1f926] Running
I1119 22:20:22.351864 244005 system_pods.go:61] "kube-apiserver-old-k8s-version-975700" [28d03966-c950-4e9c-bbd5-4aeb08bb3363] Running
I1119 22:20:22.351869 244005 system_pods.go:61] "kube-controller-manager-old-k8s-version-975700" [b2f2d323-34b1-47a7-945e-73086e2e6887] Running
I1119 22:20:22.351873 244005 system_pods.go:61] "kube-proxy-rnxxf" [f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d] Running
I1119 22:20:22.351877 244005 system_pods.go:61] "kube-scheduler-old-k8s-version-975700" [65c95750-3a2f-4847-a93d-4e54bc709449] Running
I1119 22:20:22.351892 244005 system_pods.go:61] "storage-provisioner" [6c937194-8889-47a0-b05f-7af799e18044] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1119 22:20:22.351898 244005 system_pods.go:74] duration metric: took 3.709193ms to wait for pod list to return data ...
I1119 22:20:22.351906 244005 default_sa.go:34] waiting for default service account to be created ...
I1119 22:20:22.353863 244005 default_sa.go:45] found service account: "default"
I1119 22:20:22.353906 244005 default_sa.go:55] duration metric: took 1.968518ms for default service account to be created ...
I1119 22:20:22.353917 244005 system_pods.go:116] waiting for k8s-apps to be running ...
I1119 22:20:22.356763 244005 system_pods.go:86] 8 kube-system pods found
I1119 22:20:22.356787 244005 system_pods.go:89] "coredns-5dd5756b68-8hdh7" [a4057bf2-fe2e-42db-83e9-bc625724c61c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:20:22.356792 244005 system_pods.go:89] "etcd-old-k8s-version-975700" [12a76858-b7be-4963-8323-fe57ca12a08d] Running
I1119 22:20:22.356799 244005 system_pods.go:89] "kindnet-mlzfc" [e2532f4d-a32b-45a0-b846-1d2ecea1f926] Running
I1119 22:20:22.356803 244005 system_pods.go:89] "kube-apiserver-old-k8s-version-975700" [28d03966-c950-4e9c-bbd5-4aeb08bb3363] Running
I1119 22:20:22.356810 244005 system_pods.go:89] "kube-controller-manager-old-k8s-version-975700" [b2f2d323-34b1-47a7-945e-73086e2e6887] Running
I1119 22:20:22.356813 244005 system_pods.go:89] "kube-proxy-rnxxf" [f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d] Running
I1119 22:20:22.356817 244005 system_pods.go:89] "kube-scheduler-old-k8s-version-975700" [65c95750-3a2f-4847-a93d-4e54bc709449] Running
I1119 22:20:22.356822 244005 system_pods.go:89] "storage-provisioner" [6c937194-8889-47a0-b05f-7af799e18044] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1119 22:20:22.356838 244005 retry.go:31] will retry after 295.130955ms: missing components: kube-dns
I1119 22:20:20.128552 248121 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1119 22:20:20.133893 248121 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.1/kubectl ...
I1119 22:20:20.133928 248121 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1119 22:20:20.148247 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1119 22:20:20.366418 248121 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1119 22:20:20.366472 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:20.366530 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes no-preload-638439 minikube.k8s.io/updated_at=2025_11_19T22_20_20_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=08454a179ffa60c8ae500105aac58654b5cdef58 minikube.k8s.io/name=no-preload-638439 minikube.k8s.io/primary=true
I1119 22:20:20.472760 248121 ops.go:34] apiserver oom_adj: -16
I1119 22:20:20.472956 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:20.973815 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:21.473583 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:21.973622 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:22.473704 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:22.973336 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:23.473849 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:23.973455 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:24.472997 248121 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1119 22:20:24.537110 248121 kubeadm.go:1114] duration metric: took 4.170685845s to wait for elevateKubeSystemPrivileges
I1119 22:20:24.537150 248121 kubeadm.go:403] duration metric: took 14.984446293s to StartCluster
I1119 22:20:24.537173 248121 settings.go:142] acquiring lock: {Name:mk3c795849984e82ee99295088dd85252bd75f6f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:24.537243 248121 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21918-9296/kubeconfig
I1119 22:20:24.539105 248121 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21918-9296/kubeconfig: {Name:mk5b9093863cb8ca8629eea9fd861356875781d2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1119 22:20:24.539319 248121 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1119 22:20:24.539342 248121 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1119 22:20:24.539397 248121 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1119 22:20:24.539519 248121 addons.go:70] Setting storage-provisioner=true in profile "no-preload-638439"
I1119 22:20:24.539532 248121 config.go:182] Loaded profile config "no-preload-638439": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1119 22:20:24.539540 248121 addons.go:239] Setting addon storage-provisioner=true in "no-preload-638439"
I1119 22:20:24.539552 248121 addons.go:70] Setting default-storageclass=true in profile "no-preload-638439"
I1119 22:20:24.539577 248121 host.go:66] Checking if "no-preload-638439" exists ...
I1119 22:20:24.539588 248121 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "no-preload-638439"
I1119 22:20:24.539936 248121 cli_runner.go:164] Run: docker container inspect no-preload-638439 --format={{.State.Status}}
I1119 22:20:24.540134 248121 cli_runner.go:164] Run: docker container inspect no-preload-638439 --format={{.State.Status}}
I1119 22:20:24.541288 248121 out.go:179] * Verifying Kubernetes components...
I1119 22:20:24.543039 248121 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1119 22:20:24.564207 248121 addons.go:239] Setting addon default-storageclass=true in "no-preload-638439"
I1119 22:20:24.564253 248121 host.go:66] Checking if "no-preload-638439" exists ...
I1119 22:20:24.564597 248121 cli_runner.go:164] Run: docker container inspect no-preload-638439 --format={{.State.Status}}
I1119 22:20:24.564680 248121 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1119 22:20:24.568527 248121 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1119 22:20:24.568546 248121 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1119 22:20:24.568596 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:20:24.597385 248121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa Username:docker}
I1119 22:20:24.599498 248121 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1119 22:20:24.599523 248121 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1119 22:20:24.599582 248121 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-638439
I1119 22:20:24.624046 248121 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21918-9296/.minikube/machines/no-preload-638439/id_rsa Username:docker}
I1119 22:20:24.628608 248121 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.103.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1119 22:20:24.684697 248121 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1119 22:20:24.711970 248121 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1119 22:20:24.742786 248121 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1119 22:20:24.836401 248121 start.go:977] {"host.minikube.internal": 192.168.103.1} host record injected into CoreDNS's ConfigMap
I1119 22:20:24.837864 248121 node_ready.go:35] waiting up to 6m0s for node "no-preload-638439" to be "Ready" ...
I1119 22:20:25.026785 248121 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1119 22:20:21.527976 216336 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (10.075664087s)
W1119 22:20:21.528025 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Unable to connect to the server: net/http: TLS handshake timeout
output:
** stderr **
Unable to connect to the server: net/http: TLS handshake timeout
** /stderr **
I1119 22:20:24.028516 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:20:22.657454 244005 system_pods.go:86] 8 kube-system pods found
I1119 22:20:22.657490 244005 system_pods.go:89] "coredns-5dd5756b68-8hdh7" [a4057bf2-fe2e-42db-83e9-bc625724c61c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:20:22.657499 244005 system_pods.go:89] "etcd-old-k8s-version-975700" [12a76858-b7be-4963-8323-fe57ca12a08d] Running
I1119 22:20:22.657508 244005 system_pods.go:89] "kindnet-mlzfc" [e2532f4d-a32b-45a0-b846-1d2ecea1f926] Running
I1119 22:20:22.657513 244005 system_pods.go:89] "kube-apiserver-old-k8s-version-975700" [28d03966-c950-4e9c-bbd5-4aeb08bb3363] Running
I1119 22:20:22.657520 244005 system_pods.go:89] "kube-controller-manager-old-k8s-version-975700" [b2f2d323-34b1-47a7-945e-73086e2e6887] Running
I1119 22:20:22.657526 244005 system_pods.go:89] "kube-proxy-rnxxf" [f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d] Running
I1119 22:20:22.657534 244005 system_pods.go:89] "kube-scheduler-old-k8s-version-975700" [65c95750-3a2f-4847-a93d-4e54bc709449] Running
I1119 22:20:22.657541 244005 system_pods.go:89] "storage-provisioner" [6c937194-8889-47a0-b05f-7af799e18044] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1119 22:20:22.657562 244005 retry.go:31] will retry after 290.603952ms: missing components: kube-dns
I1119 22:20:22.951933 244005 system_pods.go:86] 8 kube-system pods found
I1119 22:20:22.951963 244005 system_pods.go:89] "coredns-5dd5756b68-8hdh7" [a4057bf2-fe2e-42db-83e9-bc625724c61c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:20:22.951969 244005 system_pods.go:89] "etcd-old-k8s-version-975700" [12a76858-b7be-4963-8323-fe57ca12a08d] Running
I1119 22:20:22.951974 244005 system_pods.go:89] "kindnet-mlzfc" [e2532f4d-a32b-45a0-b846-1d2ecea1f926] Running
I1119 22:20:22.951978 244005 system_pods.go:89] "kube-apiserver-old-k8s-version-975700" [28d03966-c950-4e9c-bbd5-4aeb08bb3363] Running
I1119 22:20:22.951983 244005 system_pods.go:89] "kube-controller-manager-old-k8s-version-975700" [b2f2d323-34b1-47a7-945e-73086e2e6887] Running
I1119 22:20:22.951988 244005 system_pods.go:89] "kube-proxy-rnxxf" [f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d] Running
I1119 22:20:22.951992 244005 system_pods.go:89] "kube-scheduler-old-k8s-version-975700" [65c95750-3a2f-4847-a93d-4e54bc709449] Running
I1119 22:20:22.951996 244005 system_pods.go:89] "storage-provisioner" [6c937194-8889-47a0-b05f-7af799e18044] Running
I1119 22:20:22.952009 244005 retry.go:31] will retry after 460.674944ms: missing components: kube-dns
I1119 22:20:23.417271 244005 system_pods.go:86] 8 kube-system pods found
I1119 22:20:23.417302 244005 system_pods.go:89] "coredns-5dd5756b68-8hdh7" [a4057bf2-fe2e-42db-83e9-bc625724c61c] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1119 22:20:23.417309 244005 system_pods.go:89] "etcd-old-k8s-version-975700" [12a76858-b7be-4963-8323-fe57ca12a08d] Running
I1119 22:20:23.417314 244005 system_pods.go:89] "kindnet-mlzfc" [e2532f4d-a32b-45a0-b846-1d2ecea1f926] Running
I1119 22:20:23.417320 244005 system_pods.go:89] "kube-apiserver-old-k8s-version-975700" [28d03966-c950-4e9c-bbd5-4aeb08bb3363] Running
I1119 22:20:23.417326 244005 system_pods.go:89] "kube-controller-manager-old-k8s-version-975700" [b2f2d323-34b1-47a7-945e-73086e2e6887] Running
I1119 22:20:23.417331 244005 system_pods.go:89] "kube-proxy-rnxxf" [f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d] Running
I1119 22:20:23.417336 244005 system_pods.go:89] "kube-scheduler-old-k8s-version-975700" [65c95750-3a2f-4847-a93d-4e54bc709449] Running
I1119 22:20:23.417341 244005 system_pods.go:89] "storage-provisioner" [6c937194-8889-47a0-b05f-7af799e18044] Running
I1119 22:20:23.417365 244005 retry.go:31] will retry after 513.116078ms: missing components: kube-dns
I1119 22:20:23.935257 244005 system_pods.go:86] 8 kube-system pods found
I1119 22:20:23.935284 244005 system_pods.go:89] "coredns-5dd5756b68-8hdh7" [a4057bf2-fe2e-42db-83e9-bc625724c61c] Running
I1119 22:20:23.935290 244005 system_pods.go:89] "etcd-old-k8s-version-975700" [12a76858-b7be-4963-8323-fe57ca12a08d] Running
I1119 22:20:23.935294 244005 system_pods.go:89] "kindnet-mlzfc" [e2532f4d-a32b-45a0-b846-1d2ecea1f926] Running
I1119 22:20:23.935297 244005 system_pods.go:89] "kube-apiserver-old-k8s-version-975700" [28d03966-c950-4e9c-bbd5-4aeb08bb3363] Running
I1119 22:20:23.935301 244005 system_pods.go:89] "kube-controller-manager-old-k8s-version-975700" [b2f2d323-34b1-47a7-945e-73086e2e6887] Running
I1119 22:20:23.935304 244005 system_pods.go:89] "kube-proxy-rnxxf" [f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d] Running
I1119 22:20:23.935308 244005 system_pods.go:89] "kube-scheduler-old-k8s-version-975700" [65c95750-3a2f-4847-a93d-4e54bc709449] Running
I1119 22:20:23.935311 244005 system_pods.go:89] "storage-provisioner" [6c937194-8889-47a0-b05f-7af799e18044] Running
I1119 22:20:23.935318 244005 system_pods.go:126] duration metric: took 1.581396028s to wait for k8s-apps to be running ...
I1119 22:20:23.935324 244005 system_svc.go:44] waiting for kubelet service to be running ....
I1119 22:20:23.935362 244005 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1119 22:20:23.948529 244005 system_svc.go:56] duration metric: took 13.192475ms WaitForService to wait for kubelet
I1119 22:20:23.948562 244005 kubeadm.go:587] duration metric: took 16.221575338s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1119 22:20:23.948584 244005 node_conditions.go:102] verifying NodePressure condition ...
I1119 22:20:23.951344 244005 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1119 22:20:23.951368 244005 node_conditions.go:123] node cpu capacity is 8
I1119 22:20:23.951381 244005 node_conditions.go:105] duration metric: took 2.792615ms to run NodePressure ...
I1119 22:20:23.951394 244005 start.go:242] waiting for startup goroutines ...
I1119 22:20:23.951400 244005 start.go:247] waiting for cluster config update ...
I1119 22:20:23.951411 244005 start.go:256] writing updated cluster config ...
I1119 22:20:23.951671 244005 ssh_runner.go:195] Run: rm -f paused
I1119 22:20:23.955724 244005 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1119 22:20:23.960403 244005 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-8hdh7" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:23.964724 244005 pod_ready.go:94] pod "coredns-5dd5756b68-8hdh7" is "Ready"
I1119 22:20:23.964745 244005 pod_ready.go:86] duration metric: took 4.323941ms for pod "coredns-5dd5756b68-8hdh7" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:23.969212 244005 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:23.977143 244005 pod_ready.go:94] pod "etcd-old-k8s-version-975700" is "Ready"
I1119 22:20:23.977172 244005 pod_ready.go:86] duration metric: took 7.932702ms for pod "etcd-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:23.984279 244005 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:23.990403 244005 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-975700" is "Ready"
I1119 22:20:23.990436 244005 pod_ready.go:86] duration metric: took 6.116437ms for pod "kube-apiserver-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:23.994759 244005 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:24.360199 244005 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-975700" is "Ready"
I1119 22:20:24.360227 244005 pod_ready.go:86] duration metric: took 365.436099ms for pod "kube-controller-manager-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:24.562023 244005 pod_ready.go:83] waiting for pod "kube-proxy-rnxxf" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:24.960397 244005 pod_ready.go:94] pod "kube-proxy-rnxxf" is "Ready"
I1119 22:20:24.960428 244005 pod_ready.go:86] duration metric: took 398.37739ms for pod "kube-proxy-rnxxf" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:25.161533 244005 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:25.560960 244005 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-975700" is "Ready"
I1119 22:20:25.560992 244005 pod_ready.go:86] duration metric: took 399.43384ms for pod "kube-scheduler-old-k8s-version-975700" in "kube-system" namespace to be "Ready" or be gone ...
I1119 22:20:25.561003 244005 pod_ready.go:40] duration metric: took 1.605243985s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1119 22:20:25.605359 244005 start.go:628] kubectl: 1.34.2, cluster: 1.28.0 (minor skew: 6)
I1119 22:20:25.607589 244005 out.go:203]
W1119 22:20:25.608986 244005 out.go:285] ! /usr/local/bin/kubectl is version 1.34.2, which may have incompatibilities with Kubernetes 1.28.0.
I1119 22:20:25.610519 244005 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1119 22:20:25.612224 244005 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-975700" cluster and "default" namespace by default
I1119 22:20:25.028260 248121 addons.go:515] duration metric: took 488.871855ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1119 22:20:25.340186 248121 kapi.go:214] "coredns" deployment in "kube-system" namespace and "no-preload-638439" context rescaled to 1 replicas
W1119 22:20:26.840695 248121 node_ready.go:57] node "no-preload-638439" has "Ready":"False" status (will retry)
W1119 22:20:28.841182 248121 node_ready.go:57] node "no-preload-638439" has "Ready":"False" status (will retry)
I1119 22:20:26.041396 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": read tcp 192.168.76.1:42420->192.168.76.2:8443: read: connection reset by peer
I1119 22:20:26.041468 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:20:26.041590 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:20:26.074121 216336 cri.go:89] found id: "7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7"
I1119 22:20:26.074147 216336 cri.go:89] found id: "0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:20:26.074156 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:26.074161 216336 cri.go:89] found id: ""
I1119 22:20:26.074169 216336 logs.go:282] 3 containers: [7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:20:26.074227 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:26.080252 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:26.086170 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:26.090514 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:20:26.090588 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:20:26.119338 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:26.119365 216336 cri.go:89] found id: ""
I1119 22:20:26.119375 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:20:26.119431 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:26.123237 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:20:26.123308 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:20:26.150429 216336 cri.go:89] found id: ""
I1119 22:20:26.150465 216336 logs.go:282] 0 containers: []
W1119 22:20:26.150475 216336 logs.go:284] No container was found matching "coredns"
I1119 22:20:26.150488 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:20:26.150553 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:20:26.180127 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:26.180150 216336 cri.go:89] found id: ""
I1119 22:20:26.180167 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:20:26.180222 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:26.185074 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:20:26.185141 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:20:26.216334 216336 cri.go:89] found id: ""
I1119 22:20:26.216362 216336 logs.go:282] 0 containers: []
W1119 22:20:26.216373 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:20:26.216381 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:20:26.216440 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:20:26.246928 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:26.246952 216336 cri.go:89] found id: ""
I1119 22:20:26.246962 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:20:26.247027 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:26.252210 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:20:26.252281 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:20:26.283008 216336 cri.go:89] found id: ""
I1119 22:20:26.283052 216336 logs.go:282] 0 containers: []
W1119 22:20:26.283086 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:20:26.283101 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:20:26.283160 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:20:26.311983 216336 cri.go:89] found id: ""
I1119 22:20:26.312016 216336 logs.go:282] 0 containers: []
W1119 22:20:26.312026 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:20:26.312040 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:20:26.312059 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:20:26.372080 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:20:26.372108 216336 logs.go:123] Gathering logs for kube-apiserver [7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7] ...
I1119 22:20:26.372123 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7"
I1119 22:20:26.410125 216336 logs.go:123] Gathering logs for kube-apiserver [0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0] ...
I1119 22:20:26.410156 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 0de7a80fd8d36adf98c40ede94a9bc05ff5a19ea1f7de9d22cfe4fab02ee04d0"
I1119 22:20:26.445052 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:20:26.445081 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:20:26.488314 216336 logs.go:123] Gathering logs for container status ...
I1119 22:20:26.488348 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:20:26.519759 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:20:26.519786 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:20:26.607720 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:20:26.607753 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:20:26.622164 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:20:26.622196 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:26.658569 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:20:26.658598 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:26.690380 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:20:26.690410 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:26.723334 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:20:26.723368 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:29.254435 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:20:29.254927 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1119 22:20:29.254988 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:20:29.255050 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:20:29.281477 216336 cri.go:89] found id: "7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7"
I1119 22:20:29.281503 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:29.281509 216336 cri.go:89] found id: ""
I1119 22:20:29.281518 216336 logs.go:282] 2 containers: [7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:20:29.281576 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:29.285991 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:29.289786 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:20:29.289841 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:20:29.315177 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:29.315199 216336 cri.go:89] found id: ""
I1119 22:20:29.315208 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:20:29.315264 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:29.319376 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:20:29.319444 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:20:29.346951 216336 cri.go:89] found id: ""
I1119 22:20:29.346973 216336 logs.go:282] 0 containers: []
W1119 22:20:29.346980 216336 logs.go:284] No container was found matching "coredns"
I1119 22:20:29.346998 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:20:29.347043 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:20:29.374529 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:29.374549 216336 cri.go:89] found id: ""
I1119 22:20:29.374556 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:20:29.374608 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:29.378833 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:20:29.378918 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:20:29.409418 216336 cri.go:89] found id: ""
I1119 22:20:29.409456 216336 logs.go:282] 0 containers: []
W1119 22:20:29.409468 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:20:29.409476 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:20:29.409542 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:20:29.439747 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:29.439767 216336 cri.go:89] found id: ""
I1119 22:20:29.439775 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:20:29.439832 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:29.443967 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:20:29.444041 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:20:29.469669 216336 cri.go:89] found id: ""
I1119 22:20:29.469695 216336 logs.go:282] 0 containers: []
W1119 22:20:29.469705 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:20:29.469712 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:20:29.469769 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:20:29.496972 216336 cri.go:89] found id: ""
I1119 22:20:29.497000 216336 logs.go:282] 0 containers: []
W1119 22:20:29.497009 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:20:29.497026 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:20:29.497039 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:20:29.585833 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:20:29.585865 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:20:29.600450 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:20:29.600488 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:29.634599 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:20:29.634632 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:20:29.694751 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:20:29.694785 216336 logs.go:123] Gathering logs for kube-apiserver [7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7] ...
I1119 22:20:29.694799 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7"
I1119 22:20:29.728982 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:20:29.729009 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:29.762543 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:20:29.762572 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:29.794342 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:20:29.794374 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:29.828582 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:20:29.828610 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:20:29.874642 216336 logs.go:123] Gathering logs for container status ...
I1119 22:20:29.874672 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
W1119 22:20:31.341227 248121 node_ready.go:57] node "no-preload-638439" has "Ready":"False" status (will retry)
W1119 22:20:33.840869 248121 node_ready.go:57] node "no-preload-638439" has "Ready":"False" status (will retry)
I1119 22:20:32.406487 216336 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1119 22:20:32.406952 216336 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1119 22:20:32.407019 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1119 22:20:32.407075 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1119 22:20:32.436319 216336 cri.go:89] found id: "7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7"
I1119 22:20:32.436348 216336 cri.go:89] found id: "b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
I1119 22:20:32.436355 216336 cri.go:89] found id: ""
I1119 22:20:32.436368 216336 logs.go:282] 2 containers: [7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42]
I1119 22:20:32.436424 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:32.440717 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:32.444717 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1119 22:20:32.444781 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1119 22:20:32.470631 216336 cri.go:89] found id: "4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:32.470655 216336 cri.go:89] found id: ""
I1119 22:20:32.470666 216336 logs.go:282] 1 containers: [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc]
I1119 22:20:32.470725 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:32.474820 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1119 22:20:32.474893 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1119 22:20:32.504076 216336 cri.go:89] found id: ""
I1119 22:20:32.504104 216336 logs.go:282] 0 containers: []
W1119 22:20:32.504115 216336 logs.go:284] No container was found matching "coredns"
I1119 22:20:32.504125 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1119 22:20:32.504185 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1119 22:20:32.533110 216336 cri.go:89] found id: "599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:32.533135 216336 cri.go:89] found id: ""
I1119 22:20:32.533143 216336 logs.go:282] 1 containers: [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0]
I1119 22:20:32.533215 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:32.537455 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1119 22:20:32.537523 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1119 22:20:32.564625 216336 cri.go:89] found id: ""
I1119 22:20:32.564647 216336 logs.go:282] 0 containers: []
W1119 22:20:32.564655 216336 logs.go:284] No container was found matching "kube-proxy"
I1119 22:20:32.564661 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1119 22:20:32.564719 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1119 22:20:32.591414 216336 cri.go:89] found id: "1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:32.591443 216336 cri.go:89] found id: ""
I1119 22:20:32.591455 216336 logs.go:282] 1 containers: [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2]
I1119 22:20:32.591535 216336 ssh_runner.go:195] Run: which crictl
I1119 22:20:32.595459 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1119 22:20:32.595529 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1119 22:20:32.621765 216336 cri.go:89] found id: ""
I1119 22:20:32.621792 216336 logs.go:282] 0 containers: []
W1119 22:20:32.621801 216336 logs.go:284] No container was found matching "kindnet"
I1119 22:20:32.621807 216336 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1119 22:20:32.621862 216336 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1119 22:20:32.647922 216336 cri.go:89] found id: ""
I1119 22:20:32.647948 216336 logs.go:282] 0 containers: []
W1119 22:20:32.647958 216336 logs.go:284] No container was found matching "storage-provisioner"
I1119 22:20:32.647978 216336 logs.go:123] Gathering logs for kube-controller-manager [1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2] ...
I1119 22:20:32.648005 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1711567faf2a254de8ee6773eb8356d5c9397538723da4b355699cea8ea8aec2"
I1119 22:20:32.680718 216336 logs.go:123] Gathering logs for containerd ...
I1119 22:20:32.680745 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1119 22:20:32.726055 216336 logs.go:123] Gathering logs for container status ...
I1119 22:20:32.726088 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1119 22:20:32.757760 216336 logs.go:123] Gathering logs for kubelet ...
I1119 22:20:32.757794 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1119 22:20:32.848763 216336 logs.go:123] Gathering logs for dmesg ...
I1119 22:20:32.848797 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1119 22:20:32.862591 216336 logs.go:123] Gathering logs for describe nodes ...
I1119 22:20:32.862631 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1119 22:20:32.922769 216336 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1119 22:20:32.922788 216336 logs.go:123] Gathering logs for etcd [4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc] ...
I1119 22:20:32.922800 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4aecac92720a6940a95e679eabf3ee1217afd1b910e328854b6a49a460e2f9dc"
I1119 22:20:32.956142 216336 logs.go:123] Gathering logs for kube-scheduler [599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0] ...
I1119 22:20:32.956171 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 599fa15a1b47a9c7bd619d50d6c3ef5df360d8fed59c2ae5ca959bfbaafb91d0"
I1119 22:20:32.991968 216336 logs.go:123] Gathering logs for kube-apiserver [7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7] ...
I1119 22:20:32.992001 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7ad4b982b6755076027d5e5a0dbbc765f8f8a005fc34051b36948b140a060ce7"
I1119 22:20:33.026022 216336 logs.go:123] Gathering logs for kube-apiserver [b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42] ...
I1119 22:20:33.026050 216336 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 b0bca29d87e3482a47e70637e6082ed5b7723b5f5b5c446ee7e2bec7d66edf42"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
d5768828ca04f 56cc512116c8f 9 seconds ago Running busybox 0 36bf64ba3c00d busybox default
dcb27a5492378 ead0a4a53df89 15 seconds ago Running coredns 0 6a75c4192812f coredns-5dd5756b68-8hdh7 kube-system
537c778c87f9d 6e38f40d628db 15 seconds ago Running storage-provisioner 0 8fa22b8d20a3f storage-provisioner kube-system
9f637c51ffa43 409467f978b4a 26 seconds ago Running kindnet-cni 0 cb55d544de2ea kindnet-mlzfc kube-system
bfde9418adc9d ea1030da44aa1 29 seconds ago Running kube-proxy 0 4ca7d14c5d50a kube-proxy-rnxxf kube-system
814e6989c6431 f6f496300a2ae 48 seconds ago Running kube-scheduler 0 f5ceb3a12bb84 kube-scheduler-old-k8s-version-975700 kube-system
1870cf3b3c44b bb5e0dde9054c 48 seconds ago Running kube-apiserver 0 52831c15e2557 kube-apiserver-old-k8s-version-975700 kube-system
97883579e01ac 73deb9a3f7025 48 seconds ago Running etcd 0 e63e84e034d31 etcd-old-k8s-version-975700 kube-system
f4532683638eb 4be79c38a4bab 48 seconds ago Running kube-controller-manager 0 250cc7adfeba7 kube-controller-manager-old-k8s-version-975700 kube-system
==> containerd <==
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.712366614Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-5dd5756b68-8hdh7,Uid:a4057bf2-fe2e-42db-83e9-bc625724c61c,Namespace:kube-system,Attempt:0,} returns sandbox id \"6a75c4192812faee0e855fcba490a6d63eeaa3e8229ace4b9a3a2b128e801116\""
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.715553681Z" level=info msg="CreateContainer within sandbox \"6a75c4192812faee0e855fcba490a6d63eeaa3e8229ace4b9a3a2b128e801116\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.722344581Z" level=info msg="Container dcb27a5492378c9249ef7c6af871ff41c7849ef2087b13036c4112f3826f90bf: CDI devices from CRI Config.CDIDevices: []"
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.728923728Z" level=info msg="CreateContainer within sandbox \"6a75c4192812faee0e855fcba490a6d63eeaa3e8229ace4b9a3a2b128e801116\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"dcb27a5492378c9249ef7c6af871ff41c7849ef2087b13036c4112f3826f90bf\""
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.729475146Z" level=info msg="StartContainer for \"dcb27a5492378c9249ef7c6af871ff41c7849ef2087b13036c4112f3826f90bf\""
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.730499329Z" level=info msg="connecting to shim dcb27a5492378c9249ef7c6af871ff41c7849ef2087b13036c4112f3826f90bf" address="unix:///run/containerd/s/34a674b328f7f600d36cfd77d784cd14517a5b33bcc634daaca7b6dd09032aa9" protocol=ttrpc version=3
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.757547812Z" level=info msg="StartContainer for \"537c778c87f9d8c20894001938b5632c0e5dcc6b1095fb4d266fd4b3995811b2\" returns successfully"
Nov 19 22:20:22 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:22.786711759Z" level=info msg="StartContainer for \"dcb27a5492378c9249ef7c6af871ff41c7849ef2087b13036c4112f3826f90bf\" returns successfully"
Nov 19 22:20:26 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:26.134603361Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:b49caea0-80e8-4473-ac1f-f9bd327c3754,Namespace:default,Attempt:0,}"
Nov 19 22:20:26 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:26.185916874Z" level=info msg="connecting to shim 36bf64ba3c00d9e0c7f71f899e9cd21577248641d207dcfc98340d1d6b3cb0d0" address="unix:///run/containerd/s/c0d7613134ce7e47335ad17357d4a66a2ab52af6386e2abf7c0d2ac536b7f638" namespace=k8s.io protocol=ttrpc version=3
Nov 19 22:20:26 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:26.262497493Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:b49caea0-80e8-4473-ac1f-f9bd327c3754,Namespace:default,Attempt:0,} returns sandbox id \"36bf64ba3c00d9e0c7f71f899e9cd21577248641d207dcfc98340d1d6b3cb0d0\""
Nov 19 22:20:26 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:26.264162086Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.373146514Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.374074587Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396641"
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.375650212Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.378263887Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.378735365Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.114534001s"
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.378776793Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.380562536Z" level=info msg="CreateContainer within sandbox \"36bf64ba3c00d9e0c7f71f899e9cd21577248641d207dcfc98340d1d6b3cb0d0\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.389100774Z" level=info msg="Container d5768828ca04f9295bf18e3fc30308deb6547c5a50a2782f1e71634c15ae7e9a: CDI devices from CRI Config.CDIDevices: []"
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.397616150Z" level=info msg="CreateContainer within sandbox \"36bf64ba3c00d9e0c7f71f899e9cd21577248641d207dcfc98340d1d6b3cb0d0\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"d5768828ca04f9295bf18e3fc30308deb6547c5a50a2782f1e71634c15ae7e9a\""
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.398260870Z" level=info msg="StartContainer for \"d5768828ca04f9295bf18e3fc30308deb6547c5a50a2782f1e71634c15ae7e9a\""
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.399512803Z" level=info msg="connecting to shim d5768828ca04f9295bf18e3fc30308deb6547c5a50a2782f1e71634c15ae7e9a" address="unix:///run/containerd/s/c0d7613134ce7e47335ad17357d4a66a2ab52af6386e2abf7c0d2ac536b7f638" protocol=ttrpc version=3
Nov 19 22:20:28 old-k8s-version-975700 containerd[666]: time="2025-11-19T22:20:28.458456492Z" level=info msg="StartContainer for \"d5768828ca04f9295bf18e3fc30308deb6547c5a50a2782f1e71634c15ae7e9a\" returns successfully"
Nov 19 22:20:34 old-k8s-version-975700 containerd[666]: E1119 22:20:34.905114 666 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [dcb27a5492378c9249ef7c6af871ff41c7849ef2087b13036c4112f3826f90bf] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 4c7f44b73086be760ec9e64204f63c5cc5a952c8c1c55ba0b41d8fc3315ce3c7d0259d04847cb8b4561043d4549603f3bccfd9b397eeb814eef159d244d26f39
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:48436 - 61 "HINFO IN 2387730691433537035.6546186387081931462. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.161284203s
==> describe nodes <==
Name: old-k8s-version-975700
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-975700
kubernetes.io/os=linux
minikube.k8s.io/commit=08454a179ffa60c8ae500105aac58654b5cdef58
minikube.k8s.io/name=old-k8s-version-975700
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_19T22_19_55_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Wed, 19 Nov 2025 22:19:51 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-975700
AcquireTime: <unset>
RenewTime: Wed, 19 Nov 2025 22:20:35 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Wed, 19 Nov 2025 22:20:25 +0000 Wed, 19 Nov 2025 22:19:50 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 19 Nov 2025 22:20:25 +0000 Wed, 19 Nov 2025 22:19:50 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 19 Nov 2025 22:20:25 +0000 Wed, 19 Nov 2025 22:19:50 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 19 Nov 2025 22:20:25 +0000 Wed, 19 Nov 2025 22:20:22 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.94.2
Hostname: old-k8s-version-975700
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
System Info:
Machine ID: 2a4f1f11dedb3fb2ad8898bb691dcfbb
System UUID: 3fcee5dd-d370-4209-8cfb-b52e4110e73b
Boot ID: f21fb8e8-9754-4dc5-a8d9-ce41ba5f6057
Kernel Version: 6.8.0-1043-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12s
kube-system coredns-5dd5756b68-8hdh7 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 30s
kube-system etcd-old-k8s-version-975700 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 43s
kube-system kindnet-mlzfc 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 30s
kube-system kube-apiserver-old-k8s-version-975700 250m (3%) 0 (0%) 0 (0%) 0 (0%) 45s
kube-system kube-controller-manager-old-k8s-version-975700 200m (2%) 0 (0%) 0 (0%) 0 (0%) 43s
kube-system kube-proxy-rnxxf 0 (0%) 0 (0%) 0 (0%) 0 (0%) 30s
kube-system kube-scheduler-old-k8s-version-975700 100m (1%) 0 (0%) 0 (0%) 0 (0%) 44s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 29s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 29s kube-proxy
Normal Starting 43s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 43s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 43s kubelet Node old-k8s-version-975700 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 43s kubelet Node old-k8s-version-975700 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 43s kubelet Node old-k8s-version-975700 status is now: NodeHasSufficientPID
Normal RegisteredNode 31s node-controller Node old-k8s-version-975700 event: Registered Node old-k8s-version-975700 in Controller
Normal NodeReady 15s kubelet Node old-k8s-version-975700 status is now: NodeReady
==> dmesg <==
[Nov19 21:17] MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.
[ +0.001836] TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.
[ +0.000999] MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.
[ +0.089012] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.424964] i8042: Warning: Keylock active
[ +0.011946] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.499038] block sda: the capability attribute has been deprecated.
[ +0.090446] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.026259] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +4.862736] kauditd_printk_skb: 47 callbacks suppressed
==> etcd [97883579e01acd8bc2695b07f55c948f3a46c160bf534f88de73606eaba10069] <==
{"level":"info","ts":"2025-11-19T22:19:49.465492Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.94.2:2380"}
{"level":"info","ts":"2025-11-19T22:19:49.465528Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.94.2:2380"}
{"level":"info","ts":"2025-11-19T22:19:50.345522Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-19T22:19:50.345562Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-19T22:19:50.345577Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 received MsgPreVoteResp from dfc97eb0aae75b33 at term 1"}
{"level":"info","ts":"2025-11-19T22:19:50.345588Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became candidate at term 2"}
{"level":"info","ts":"2025-11-19T22:19:50.345593Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 received MsgVoteResp from dfc97eb0aae75b33 at term 2"}
{"level":"info","ts":"2025-11-19T22:19:50.345601Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became leader at term 2"}
{"level":"info","ts":"2025-11-19T22:19:50.345607Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: dfc97eb0aae75b33 elected leader dfc97eb0aae75b33 at term 2"}
{"level":"info","ts":"2025-11-19T22:19:50.346237Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:19:50.346786Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-19T22:19:50.346778Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"dfc97eb0aae75b33","local-member-attributes":"{Name:old-k8s-version-975700 ClientURLs:[https://192.168.94.2:2379]}","request-path":"/0/members/dfc97eb0aae75b33/attributes","cluster-id":"da400bbece288f5a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-19T22:19:50.346819Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-19T22:19:50.34703Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"da400bbece288f5a","local-member-id":"dfc97eb0aae75b33","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:19:50.347114Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-19T22:19:50.347198Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-19T22:19:50.347172Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:19:50.347229Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-19T22:19:50.34807Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-19T22:19:50.348559Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.94.2:2379"}
{"level":"warn","ts":"2025-11-19T22:19:52.006287Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"208.664484ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/leases/kube-system/apiserver-pcqkfx5qiyeeley4bpw5zibjhu\" ","response":"range_response_count:0 size:4"}
{"level":"info","ts":"2025-11-19T22:19:52.0064Z","caller":"traceutil/trace.go:171","msg":"trace[898828708] range","detail":"{range_begin:/registry/leases/kube-system/apiserver-pcqkfx5qiyeeley4bpw5zibjhu; range_end:; response_count:0; response_revision:69; }","duration":"208.799616ms","start":"2025-11-19T22:19:51.797579Z","end":"2025-11-19T22:19:52.006378Z","steps":["trace[898828708] 'range keys from in-memory index tree' (duration: 208.571934ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-19T22:20:07.925909Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"124.040627ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/configmaps/kube-system/coredns\" ","response":"range_response_count:1 size:612"}
{"level":"info","ts":"2025-11-19T22:20:07.925985Z","caller":"traceutil/trace.go:171","msg":"trace[1355111703] range","detail":"{range_begin:/registry/configmaps/kube-system/coredns; range_end:; response_count:1; response_revision:350; }","duration":"124.145953ms","start":"2025-11-19T22:20:07.801823Z","end":"2025-11-19T22:20:07.925969Z","steps":["trace[1355111703] 'range keys from in-memory index tree' (duration: 123.893977ms)"],"step_count":1}
{"level":"info","ts":"2025-11-19T22:20:07.945114Z","caller":"traceutil/trace.go:171","msg":"trace[986567943] transaction","detail":"{read_only:false; response_revision:351; number_of_response:1; }","duration":"142.590181ms","start":"2025-11-19T22:20:07.802499Z","end":"2025-11-19T22:20:07.945089Z","steps":["trace[986567943] 'process raft request' (duration: 142.419431ms)"],"step_count":1}
==> kernel <==
22:20:37 up 1:02, 0 user, load average: 4.27, 3.36, 2.11
Linux old-k8s-version-975700 6.8.0-1043-gcp #46~22.04.1-Ubuntu SMP Wed Oct 22 19:00:03 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [9f637c51ffa434a826f6584d8a7faf4701e1f09be3a0f36a1d28e02a37c6fb8d] <==
I1119 22:20:11.957590 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1119 22:20:11.957822 1 main.go:139] hostIP = 192.168.94.2
podIP = 192.168.94.2
I1119 22:20:11.958041 1 main.go:148] setting mtu 1500 for CNI
I1119 22:20:11.958058 1 main.go:178] kindnetd IP family: "ipv4"
I1119 22:20:11.958074 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-19T22:20:12Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1119 22:20:12.159373 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1119 22:20:12.159514 1 controller.go:381] "Waiting for informer caches to sync"
I1119 22:20:12.159531 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1119 22:20:12.159716 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1119 22:20:12.538063 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1119 22:20:12.538126 1 metrics.go:72] Registering metrics
I1119 22:20:12.538374 1 controller.go:711] "Syncing nftables rules"
I1119 22:20:22.164952 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}]
I1119 22:20:22.165012 1 main.go:301] handling current node
I1119 22:20:32.161088 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}]
I1119 22:20:32.161124 1 main.go:301] handling current node
==> kube-apiserver [1870cf3b3c44ba81df1590d986f8a70efb48ac5a464f0a3d4d757b18fc420709] <==
I1119 22:19:51.591405 1 apf_controller.go:377] Running API Priority and Fairness config worker
I1119 22:19:51.591414 1 apf_controller.go:380] Running API Priority and Fairness periodic rebalancing process
I1119 22:19:51.591407 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1119 22:19:51.591438 1 cache.go:39] Caches are synced for autoregister controller
I1119 22:19:51.591387 1 shared_informer.go:318] Caches are synced for configmaps
I1119 22:19:51.593118 1 controller.go:624] quota admission added evaluator for: namespaces
E1119 22:19:51.595601 1 controller.go:146] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms"
I1119 22:19:51.608554 1 shared_informer.go:318] Caches are synced for node_authorizer
I1119 22:19:52.008399 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1119 22:19:52.497067 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1119 22:19:52.500707 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1119 22:19:52.500727 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1119 22:19:52.938966 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1119 22:19:52.979169 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1119 22:19:53.101027 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1119 22:19:53.107157 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.94.2]
I1119 22:19:53.108241 1 controller.go:624] quota admission added evaluator for: endpoints
I1119 22:19:53.112503 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1119 22:19:53.552446 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1119 22:19:54.613121 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1119 22:19:54.625563 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1119 22:19:54.635960 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1119 22:20:06.459115 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1119 22:20:07.162080 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1119 22:20:07.162080 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [f4532683638eb7620857fe45f4fd3c3ed09ef48600c71e8fb4fb0f9dae88bfb2] <==
I1119 22:20:06.563934 1 event.go:307] "Event occurred" object="kube-system/kube-scheduler-old-k8s-version-975700" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1119 22:20:06.565627 1 event.go:307] "Event occurred" object="kube-system/etcd-old-k8s-version-975700" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1119 22:20:06.565755 1 event.go:307] "Event occurred" object="kube-system/kube-apiserver-old-k8s-version-975700" fieldPath="" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready"
I1119 22:20:06.609574 1 shared_informer.go:318] Caches are synced for resource quota
I1119 22:20:06.927535 1 shared_informer.go:318] Caches are synced for garbage collector
I1119 22:20:07.000472 1 shared_informer.go:318] Caches are synced for garbage collector
I1119 22:20:07.000512 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1119 22:20:07.173283 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-rnxxf"
I1119 22:20:07.176815 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-mlzfc"
I1119 22:20:07.368445 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-vbfhv"
I1119 22:20:07.377915 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-8hdh7"
I1119 22:20:07.385341 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="921.876981ms"
I1119 22:20:07.403436 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="18.02637ms"
I1119 22:20:07.403590 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="97µs"
I1119 22:20:08.346162 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1119 22:20:08.357372 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-vbfhv"
I1119 22:20:08.366742 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="21.01104ms"
I1119 22:20:08.373376 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="6.555995ms"
I1119 22:20:08.373523 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="103.519µs"
I1119 22:20:22.284386 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="125.689µs"
I1119 22:20:22.302759 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="82.916µs"
I1119 22:20:23.804590 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="9.984643ms"
I1119 22:20:23.825468 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="9.454615ms"
I1119 22:20:23.825553 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="44.257µs"
I1119 22:20:26.560333 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [bfde9418adc9d7aba501fe3c84086b7de3e6632fdd8aabb2eb31e57c6302f8a1] <==
I1119 22:20:08.542091 1 server_others.go:69] "Using iptables proxy"
I1119 22:20:08.554521 1 node.go:141] Successfully retrieved node IP: 192.168.94.2
I1119 22:20:08.579485 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1119 22:20:08.581958 1 server_others.go:152] "Using iptables Proxier"
I1119 22:20:08.581998 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1119 22:20:08.582008 1 server_others.go:438] "Defaulting to no-op detect-local"
I1119 22:20:08.582058 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1119 22:20:08.582375 1 server.go:846] "Version info" version="v1.28.0"
I1119 22:20:08.582389 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1119 22:20:08.584350 1 config.go:315] "Starting node config controller"
I1119 22:20:08.584377 1 shared_informer.go:311] Waiting for caches to sync for node config
I1119 22:20:08.584426 1 config.go:188] "Starting service config controller"
I1119 22:20:08.584459 1 shared_informer.go:311] Waiting for caches to sync for service config
I1119 22:20:08.584486 1 config.go:97] "Starting endpoint slice config controller"
I1119 22:20:08.584491 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1119 22:20:08.684578 1 shared_informer.go:318] Caches are synced for service config
I1119 22:20:08.684601 1 shared_informer.go:318] Caches are synced for endpoint slice config
I1119 22:20:08.684577 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [814e6989c64319d934f5f210646b29c75985c3fe82e3642066c6cced56537e32] <==
W1119 22:19:51.558017 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1119 22:19:51.558302 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1119 22:19:51.557982 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1119 22:19:51.558323 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1119 22:19:51.558217 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1119 22:19:51.558365 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1119 22:19:52.378035 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1119 22:19:52.378068 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1119 22:19:52.502983 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1119 22:19:52.503017 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W1119 22:19:52.577347 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1119 22:19:52.577387 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1119 22:19:52.620635 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1119 22:19:52.620663 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1119 22:19:52.621642 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1119 22:19:52.621673 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1119 22:19:52.622811 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1119 22:19:52.622838 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1119 22:19:52.655572 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1119 22:19:52.655637 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1119 22:19:52.670809 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1119 22:19:52.670851 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1119 22:19:52.738351 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1119 22:19:52.738419 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
I1119 22:19:55.553708 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: I1119 22:20:07.254431 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/e2532f4d-a32b-45a0-b846-1d2ecea1f926-lib-modules\") pod \"kindnet-mlzfc\" (UID: \"e2532f4d-a32b-45a0-b846-1d2ecea1f926\") " pod="kube-system/kindnet-mlzfc"
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: I1119 22:20:07.254510 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fnz9\" (UniqueName: \"kubernetes.io/projected/f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d-kube-api-access-9fnz9\") pod \"kube-proxy-rnxxf\" (UID: \"f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d\") " pod="kube-system/kube-proxy-rnxxf"
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: I1119 22:20:07.254561 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/e2532f4d-a32b-45a0-b846-1d2ecea1f926-cni-cfg\") pod \"kindnet-mlzfc\" (UID: \"e2532f4d-a32b-45a0-b846-1d2ecea1f926\") " pod="kube-system/kindnet-mlzfc"
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: I1119 22:20:07.254783 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d-kube-proxy\") pod \"kube-proxy-rnxxf\" (UID: \"f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d\") " pod="kube-system/kube-proxy-rnxxf"
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: I1119 22:20:07.254836 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d-xtables-lock\") pod \"kube-proxy-rnxxf\" (UID: \"f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d\") " pod="kube-system/kube-proxy-rnxxf"
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: E1119 22:20:07.363793 1560 projected.go:292] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: E1119 22:20:07.363834 1560 projected.go:198] Error preparing data for projected volume kube-api-access-rpv66 for pod kube-system/kindnet-mlzfc: configmap "kube-root-ca.crt" not found
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: E1119 22:20:07.363943 1560 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e2532f4d-a32b-45a0-b846-1d2ecea1f926-kube-api-access-rpv66 podName:e2532f4d-a32b-45a0-b846-1d2ecea1f926 nodeName:}" failed. No retries permitted until 2025-11-19 22:20:07.863913255 +0000 UTC m=+13.276094662 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-rpv66" (UniqueName: "kubernetes.io/projected/e2532f4d-a32b-45a0-b846-1d2ecea1f926-kube-api-access-rpv66") pod "kindnet-mlzfc" (UID: "e2532f4d-a32b-45a0-b846-1d2ecea1f926") : configmap "kube-root-ca.crt" not found
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: E1119 22:20:07.364286 1560 projected.go:292] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: E1119 22:20:07.364311 1560 projected.go:198] Error preparing data for projected volume kube-api-access-9fnz9 for pod kube-system/kube-proxy-rnxxf: configmap "kube-root-ca.crt" not found
Nov 19 22:20:07 old-k8s-version-975700 kubelet[1560]: E1119 22:20:07.364372 1560 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d-kube-api-access-9fnz9 podName:f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d nodeName:}" failed. No retries permitted until 2025-11-19 22:20:07.864353345 +0000 UTC m=+13.276534732 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-9fnz9" (UniqueName: "kubernetes.io/projected/f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d-kube-api-access-9fnz9") pod "kube-proxy-rnxxf" (UID: "f06c0f26-a6bc-4dcb-a9f4-c64b43b4cc1d") : configmap "kube-root-ca.crt" not found
Nov 19 22:20:08 old-k8s-version-975700 kubelet[1560]: I1119 22:20:08.753381 1560 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-rnxxf" podStartSLOduration=1.753327393 podCreationTimestamp="2025-11-19 22:20:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-19 22:20:08.753080476 +0000 UTC m=+14.165261906" watchObservedRunningTime="2025-11-19 22:20:08.753327393 +0000 UTC m=+14.165508800"
Nov 19 22:20:12 old-k8s-version-975700 kubelet[1560]: I1119 22:20:12.861606 1560 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-mlzfc" podStartSLOduration=2.782502482 podCreationTimestamp="2025-11-19 22:20:07 +0000 UTC" firstStartedPulling="2025-11-19 22:20:08.564687803 +0000 UTC m=+13.976869202" lastFinishedPulling="2025-11-19 22:20:11.643733018 +0000 UTC m=+17.055914418" observedRunningTime="2025-11-19 22:20:12.861400313 +0000 UTC m=+18.273581719" watchObservedRunningTime="2025-11-19 22:20:12.861547698 +0000 UTC m=+18.273729104"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.261744 1560 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.283141 1560 topology_manager.go:215] "Topology Admit Handler" podUID="6c937194-8889-47a0-b05f-7af799e18044" podNamespace="kube-system" podName="storage-provisioner"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.284839 1560 topology_manager.go:215] "Topology Admit Handler" podUID="a4057bf2-fe2e-42db-83e9-bc625724c61c" podNamespace="kube-system" podName="coredns-5dd5756b68-8hdh7"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.465780 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbjsb\" (UniqueName: \"kubernetes.io/projected/6c937194-8889-47a0-b05f-7af799e18044-kube-api-access-xbjsb\") pod \"storage-provisioner\" (UID: \"6c937194-8889-47a0-b05f-7af799e18044\") " pod="kube-system/storage-provisioner"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.465975 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zd7zm\" (UniqueName: \"kubernetes.io/projected/a4057bf2-fe2e-42db-83e9-bc625724c61c-kube-api-access-zd7zm\") pod \"coredns-5dd5756b68-8hdh7\" (UID: \"a4057bf2-fe2e-42db-83e9-bc625724c61c\") " pod="kube-system/coredns-5dd5756b68-8hdh7"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.466031 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/6c937194-8889-47a0-b05f-7af799e18044-tmp\") pod \"storage-provisioner\" (UID: \"6c937194-8889-47a0-b05f-7af799e18044\") " pod="kube-system/storage-provisioner"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.466065 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a4057bf2-fe2e-42db-83e9-bc625724c61c-config-volume\") pod \"coredns-5dd5756b68-8hdh7\" (UID: \"a4057bf2-fe2e-42db-83e9-bc625724c61c\") " pod="kube-system/coredns-5dd5756b68-8hdh7"
Nov 19 22:20:22 old-k8s-version-975700 kubelet[1560]: I1119 22:20:22.790518 1560 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=14.790461437 podCreationTimestamp="2025-11-19 22:20:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-19 22:20:22.789226683 +0000 UTC m=+28.201408091" watchObservedRunningTime="2025-11-19 22:20:22.790461437 +0000 UTC m=+28.202642846"
Nov 19 22:20:23 old-k8s-version-975700 kubelet[1560]: I1119 22:20:23.794502 1560 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-8hdh7" podStartSLOduration=16.794448045 podCreationTimestamp="2025-11-19 22:20:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-19 22:20:23.792204756 +0000 UTC m=+29.204386163" watchObservedRunningTime="2025-11-19 22:20:23.794448045 +0000 UTC m=+29.206629453"
Nov 19 22:20:25 old-k8s-version-975700 kubelet[1560]: I1119 22:20:25.822716 1560 topology_manager.go:215] "Topology Admit Handler" podUID="b49caea0-80e8-4473-ac1f-f9bd327c3754" podNamespace="default" podName="busybox"
Nov 19 22:20:25 old-k8s-version-975700 kubelet[1560]: I1119 22:20:25.990052 1560 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87p55\" (UniqueName: \"kubernetes.io/projected/b49caea0-80e8-4473-ac1f-f9bd327c3754-kube-api-access-87p55\") pod \"busybox\" (UID: \"b49caea0-80e8-4473-ac1f-f9bd327c3754\") " pod="default/busybox"
Nov 19 22:20:28 old-k8s-version-975700 kubelet[1560]: I1119 22:20:28.806269 1560 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.691001227 podCreationTimestamp="2025-11-19 22:20:25 +0000 UTC" firstStartedPulling="2025-11-19 22:20:26.263867005 +0000 UTC m=+31.676048399" lastFinishedPulling="2025-11-19 22:20:28.379090043 +0000 UTC m=+33.791271442" observedRunningTime="2025-11-19 22:20:28.805872451 +0000 UTC m=+34.218053858" watchObservedRunningTime="2025-11-19 22:20:28.80622427 +0000 UTC m=+34.218405676"
==> storage-provisioner [537c778c87f9d8c20894001938b5632c0e5dcc6b1095fb4d266fd4b3995811b2] <==
I1119 22:20:22.762742 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1119 22:20:22.772216 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1119 22:20:22.772484 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1119 22:20:22.782676 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1119 22:20:22.782729 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"750e6d2d-dbb6-45a4-b78a-de5bffe0f948", APIVersion:"v1", ResourceVersion:"406", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-975700_aeb53126-798f-4b08-be45-abf6358cfbca became leader
I1119 22:20:22.782814 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-975700_aeb53126-798f-4b08-be45-abf6358cfbca!
I1119 22:20:22.883137 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-975700_aeb53126-798f-4b08-be45-abf6358cfbca!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-975700 -n old-k8s-version-975700
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-975700 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestStartStop/group/old-k8s-version/serial/DeployApp (13.09s)