=== RUN TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-012258 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [fa895e52-0bff-4604-8b62-fd0f087015e8] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [fa895e52-0bff-4604-8b62-fd0f087015e8] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 9.004215918s
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-012258 exec busybox -- /bin/sh -c "ulimit -n"
start_stop_delete_test.go:194: 'ulimit -n' returned 1024, expected 1048576
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-012258
helpers_test.go:243: (dbg) docker inspect old-k8s-version-012258:
-- stdout --
[
{
"Id": "b631b0b0e9d5aafe8f15c853910a13c50352a0ccce9accbcd62a4ea770c78c5d",
"Created": "2025-11-21T14:29:18.305605728Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 251679,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-21T14:29:18.348841908Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:a368e3d71517ce17114afb6c9921965419df972dd0e2d32a9973a8946f0910a3",
"ResolvConfPath": "/var/lib/docker/containers/b631b0b0e9d5aafe8f15c853910a13c50352a0ccce9accbcd62a4ea770c78c5d/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/b631b0b0e9d5aafe8f15c853910a13c50352a0ccce9accbcd62a4ea770c78c5d/hostname",
"HostsPath": "/var/lib/docker/containers/b631b0b0e9d5aafe8f15c853910a13c50352a0ccce9accbcd62a4ea770c78c5d/hosts",
"LogPath": "/var/lib/docker/containers/b631b0b0e9d5aafe8f15c853910a13c50352a0ccce9accbcd62a4ea770c78c5d/b631b0b0e9d5aafe8f15c853910a13c50352a0ccce9accbcd62a4ea770c78c5d-json.log",
"Name": "/old-k8s-version-012258",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-012258:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-012258",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "b631b0b0e9d5aafe8f15c853910a13c50352a0ccce9accbcd62a4ea770c78c5d",
"LowerDir": "/var/lib/docker/overlay2/4ea3913a068d8b871d800eefdd7cdd11e4851e7b5031ea166038678d2b0108e1-init/diff:/var/lib/docker/overlay2/a649757dd9587fa5a20ca8a56ec1923099f2a5e912dc7e8e1dfa08e79248b59f/diff",
"MergedDir": "/var/lib/docker/overlay2/4ea3913a068d8b871d800eefdd7cdd11e4851e7b5031ea166038678d2b0108e1/merged",
"UpperDir": "/var/lib/docker/overlay2/4ea3913a068d8b871d800eefdd7cdd11e4851e7b5031ea166038678d2b0108e1/diff",
"WorkDir": "/var/lib/docker/overlay2/4ea3913a068d8b871d800eefdd7cdd11e4851e7b5031ea166038678d2b0108e1/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-012258",
"Source": "/var/lib/docker/volumes/old-k8s-version-012258/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-012258",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-012258",
"name.minikube.sigs.k8s.io": "old-k8s-version-012258",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "46765a8ec6da2ef06d0a63c5e792b68206b48e74aeaeb299bf506ff70e7dcffd",
"SandboxKey": "/var/run/docker/netns/46765a8ec6da",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33060"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33061"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33064"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33062"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33063"
}
]
},
"Networks": {
"old-k8s-version-012258": {
"IPAMConfig": {
"IPv4Address": "192.168.94.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "ecee753316979a1bb886a50ec401a80f6274b9bc39c4a8bb1732e91064c178b9",
"EndpointID": "c92e22445c114f178de1b5adf2a20b74000e44859ae25f57affa69d30eb60100",
"Gateway": "192.168.94.1",
"IPAddress": "192.168.94.2",
"MacAddress": "9e:cd:46:05:9b:55",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-012258",
"b631b0b0e9d5"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-012258 -n old-k8s-version-012258
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-012258 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-012258 logs -n 25: (1.212323377s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────────┬─────────┬─────────┬─────────────────────┬────────
─────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────────┼─────────┼─────────┼─────────────────────┼────────
─────────────┤
│ ssh │ -p cilium-459127 sudo systemctl cat containerd --no-pager │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ ssh │ -p cilium-459127 sudo cat /lib/systemd/system/containerd.service │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ ssh │ -p cilium-459127 sudo cat /etc/containerd/config.toml │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ ssh │ -p cilium-459127 sudo containerd config dump │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ ssh │ -p cilium-459127 sudo systemctl status crio --all --full --no-pager │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ delete │ -p cert-expiration-371956 │ cert-expiration-371956 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ 21 Nov 25 14:28 UTC │
│ ssh │ -p cilium-459127 sudo systemctl cat crio --no-pager │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ ssh │ -p cilium-459127 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ ssh │ -p cilium-459127 sudo crio config │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ delete │ -p cilium-459127 │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ 21 Nov 25 14:28 UTC │
│ start │ -p cert-options-733993 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-733993 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ 21 Nov 25 14:29 UTC │
│ start │ -p force-systemd-flag-730471 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-flag-730471 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ 21 Nov 25 14:29 UTC │
│ ssh │ -p NoKubernetes-187733 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-187733 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ stop │ -p NoKubernetes-187733 │ NoKubernetes-187733 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ 21 Nov 25 14:29 UTC │
│ start │ -p NoKubernetes-187733 --driver=docker --container-runtime=containerd │ NoKubernetes-187733 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:29 UTC │
│ ssh │ -p NoKubernetes-187733 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-187733 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ │
│ delete │ -p NoKubernetes-187733 │ NoKubernetes-187733 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:29 UTC │
│ start │ -p old-k8s-version-012258 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-012258 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:30 UTC │
│ ssh │ cert-options-733993 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-733993 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:29 UTC │
│ ssh │ -p cert-options-733993 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-733993 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:29 UTC │
│ delete │ -p cert-options-733993 │ cert-options-733993 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:29 UTC │
│ start │ -p no-preload-921956 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ no-preload-921956 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:30 UTC │
│ ssh │ force-systemd-flag-730471 ssh cat /etc/containerd/config.toml │ force-systemd-flag-730471 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:29 UTC │
│ delete │ -p force-systemd-flag-730471 │ force-systemd-flag-730471 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:29 UTC │
│ start │ -p default-k8s-diff-port-376255 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ default-k8s-diff-port-376255 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:30 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────┴─────────┴─────────┴─────────────────────┴────────
─────────────┘
==> Last Start <==
Log file created at: 2025/11/21 14:29:24
Running on machine: ubuntu-20-agent-6
Binary: Built with gc go1.24.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1121 14:29:24.877938 255774 out.go:360] Setting OutFile to fd 1 ...
I1121 14:29:24.878133 255774 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1121 14:29:24.878179 255774 out.go:374] Setting ErrFile to fd 2...
I1121 14:29:24.878200 255774 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1121 14:29:24.879901 255774 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21847-11004/.minikube/bin
I1121 14:29:24.881344 255774 out.go:368] Setting JSON to false
I1121 14:29:24.883254 255774 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-6","uptime":4307,"bootTime":1763731058,"procs":299,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1121 14:29:24.883372 255774 start.go:143] virtualization: kvm guest
I1121 14:29:24.885483 255774 out.go:179] * [default-k8s-diff-port-376255] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1121 14:29:24.887201 255774 notify.go:221] Checking for updates...
I1121 14:29:24.887242 255774 out.go:179] - MINIKUBE_LOCATION=21847
I1121 14:29:24.890729 255774 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1121 14:29:24.892963 255774 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21847-11004/kubeconfig
I1121 14:29:24.894677 255774 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21847-11004/.minikube
I1121 14:29:24.897870 255774 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1121 14:29:24.899765 255774 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1121 14:29:24.902854 255774 config.go:182] Loaded profile config "kubernetes-upgrade-797080": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1121 14:29:24.903030 255774 config.go:182] Loaded profile config "no-preload-921956": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1121 14:29:24.903162 255774 config.go:182] Loaded profile config "old-k8s-version-012258": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1121 14:29:24.903312 255774 driver.go:422] Setting default libvirt URI to qemu:///system
I1121 14:29:24.939143 255774 docker.go:124] docker version: linux-29.0.2:Docker Engine - Community
I1121 14:29:24.939248 255774 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1121 14:29:25.025144 255774 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:80 OomKillDisable:false NGoroutines:92 SystemTime:2025-11-21 14:29:25.01035373 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x8
6_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652072448 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[ma
p[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1121 14:29:25.025295 255774 docker.go:319] overlay module found
I1121 14:29:25.027378 255774 out.go:179] * Using the docker driver based on user configuration
I1121 14:29:22.611340 249617 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-012258
I1121 14:29:22.611365 249617 ubuntu.go:182] provisioning hostname "old-k8s-version-012258"
I1121 14:29:22.611426 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:22.635589 249617 main.go:143] libmachine: Using SSH client type: native
I1121 14:29:22.635869 249617 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33060 <nil> <nil>}
I1121 14:29:22.635891 249617 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-012258 && echo "old-k8s-version-012258" | sudo tee /etc/hostname
I1121 14:29:22.796661 249617 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-012258
I1121 14:29:22.796754 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:22.822578 249617 main.go:143] libmachine: Using SSH client type: native
I1121 14:29:22.822834 249617 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33060 <nil> <nil>}
I1121 14:29:22.822860 249617 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-012258' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-012258/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-012258' | sudo tee -a /etc/hosts;
fi
fi
I1121 14:29:22.970644 249617 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1121 14:29:22.970676 249617 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21847-11004/.minikube CaCertPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21847-11004/.minikube}
I1121 14:29:22.970732 249617 ubuntu.go:190] setting up certificates
I1121 14:29:22.970743 249617 provision.go:84] configureAuth start
I1121 14:29:22.970826 249617 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-012258
I1121 14:29:22.991118 249617 provision.go:143] copyHostCerts
I1121 14:29:22.991183 249617 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-11004/.minikube/cert.pem, removing ...
I1121 14:29:22.991193 249617 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-11004/.minikube/cert.pem
I1121 14:29:22.991250 249617 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21847-11004/.minikube/cert.pem (1123 bytes)
I1121 14:29:22.991367 249617 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-11004/.minikube/key.pem, removing ...
I1121 14:29:22.991381 249617 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-11004/.minikube/key.pem
I1121 14:29:22.991414 249617 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21847-11004/.minikube/key.pem (1675 bytes)
I1121 14:29:22.991488 249617 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-11004/.minikube/ca.pem, removing ...
I1121 14:29:22.991499 249617 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-11004/.minikube/ca.pem
I1121 14:29:22.991526 249617 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21847-11004/.minikube/ca.pem (1078 bytes)
I1121 14:29:22.991627 249617 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21847-11004/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-012258 san=[127.0.0.1 192.168.94.2 localhost minikube old-k8s-version-012258]
I1121 14:29:23.140756 249617 provision.go:177] copyRemoteCerts
I1121 14:29:23.140833 249617 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1121 14:29:23.140885 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:23.161751 249617 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33060 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/old-k8s-version-012258/id_rsa Username:docker}
I1121 14:29:23.269718 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1121 14:29:23.292619 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1121 14:29:23.314336 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1121 14:29:23.337086 249617 provision.go:87] duration metric: took 366.309314ms to configureAuth
I1121 14:29:23.337129 249617 ubuntu.go:206] setting minikube options for container-runtime
I1121 14:29:23.337306 249617 config.go:182] Loaded profile config "old-k8s-version-012258": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1121 14:29:23.337320 249617 machine.go:97] duration metric: took 3.89496072s to provisionDockerMachine
I1121 14:29:23.337326 249617 client.go:176] duration metric: took 11.527957207s to LocalClient.Create
I1121 14:29:23.337344 249617 start.go:167] duration metric: took 11.528071392s to libmachine.API.Create "old-k8s-version-012258"
I1121 14:29:23.337352 249617 start.go:293] postStartSetup for "old-k8s-version-012258" (driver="docker")
I1121 14:29:23.337365 249617 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1121 14:29:23.337422 249617 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1121 14:29:23.337471 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:23.359217 249617 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33060 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/old-k8s-version-012258/id_rsa Username:docker}
I1121 14:29:23.466089 249617 ssh_runner.go:195] Run: cat /etc/os-release
I1121 14:29:23.470146 249617 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1121 14:29:23.470174 249617 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1121 14:29:23.470185 249617 filesync.go:126] Scanning /home/jenkins/minikube-integration/21847-11004/.minikube/addons for local assets ...
I1121 14:29:23.470249 249617 filesync.go:126] Scanning /home/jenkins/minikube-integration/21847-11004/.minikube/files for local assets ...
I1121 14:29:23.470349 249617 filesync.go:149] local asset: /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem -> 145232.pem in /etc/ssl/certs
I1121 14:29:23.470480 249617 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1121 14:29:23.479086 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem --> /etc/ssl/certs/145232.pem (1708 bytes)
I1121 14:29:23.506776 249617 start.go:296] duration metric: took 169.402964ms for postStartSetup
I1121 14:29:23.507166 249617 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-012258
I1121 14:29:23.527044 249617 profile.go:143] Saving config to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/config.json ...
I1121 14:29:23.527374 249617 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1121 14:29:23.527425 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:23.546669 249617 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33060 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/old-k8s-version-012258/id_rsa Username:docker}
I1121 14:29:23.645314 249617 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1121 14:29:23.650498 249617 start.go:128] duration metric: took 11.844529266s to createHost
I1121 14:29:23.650523 249617 start.go:83] releasing machines lock for "old-k8s-version-012258", held for 11.844683904s
I1121 14:29:23.650592 249617 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-012258
I1121 14:29:23.671161 249617 ssh_runner.go:195] Run: cat /version.json
I1121 14:29:23.671227 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:23.671321 249617 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1121 14:29:23.671403 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:23.694189 249617 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33060 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/old-k8s-version-012258/id_rsa Username:docker}
I1121 14:29:23.694196 249617 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33060 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/old-k8s-version-012258/id_rsa Username:docker}
I1121 14:29:23.856609 249617 ssh_runner.go:195] Run: systemctl --version
I1121 14:29:23.863273 249617 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1121 14:29:23.867917 249617 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1121 14:29:23.867991 249617 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1121 14:29:23.895679 249617 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1121 14:29:23.895707 249617 start.go:496] detecting cgroup driver to use...
I1121 14:29:23.895742 249617 detect.go:190] detected "systemd" cgroup driver on host os
I1121 14:29:23.895805 249617 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1121 14:29:23.911897 249617 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1121 14:29:23.925350 249617 docker.go:218] disabling cri-docker service (if available) ...
I1121 14:29:23.925400 249617 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1121 14:29:23.943424 249617 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1121 14:29:23.962675 249617 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1121 14:29:24.059689 249617 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1121 14:29:24.169263 249617 docker.go:234] disabling docker service ...
I1121 14:29:24.169325 249617 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1121 14:29:24.191949 249617 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1121 14:29:24.206181 249617 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1121 14:29:24.319402 249617 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1121 14:29:24.455060 249617 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1121 14:29:24.472888 249617 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1121 14:29:24.497138 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1121 14:29:24.524424 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1121 14:29:24.536491 249617 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1121 14:29:24.536702 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1121 14:29:24.547193 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1121 14:29:24.559919 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1121 14:29:24.571627 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1121 14:29:24.581977 249617 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1121 14:29:24.629839 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1121 14:29:24.640310 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1121 14:29:24.650595 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1121 14:29:24.660801 249617 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1121 14:29:24.669493 249617 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1121 14:29:24.677810 249617 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:24.781513 249617 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1121 14:29:24.929576 249617 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1121 14:29:24.929707 249617 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1121 14:29:24.936782 249617 start.go:564] Will wait 60s for crictl version
I1121 14:29:24.936893 249617 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.942453 249617 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1121 14:29:24.986447 249617 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1121 14:29:24.986527 249617 ssh_runner.go:195] Run: containerd --version
I1121 14:29:25.018021 249617 ssh_runner.go:195] Run: containerd --version
I1121 14:29:25.051308 249617 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1121 14:29:25.029036 255774 start.go:309] selected driver: docker
I1121 14:29:25.029056 255774 start.go:930] validating driver "docker" against <nil>
I1121 14:29:25.029071 255774 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1121 14:29:25.029977 255774 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1121 14:29:25.123370 255774 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:80 OomKillDisable:false NGoroutines:92 SystemTime:2025-11-21 14:29:25.11156096 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x8
6_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652072448 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[ma
p[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1121 14:29:25.123696 255774 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1121 14:29:25.124078 255774 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1121 14:29:25.125758 255774 out.go:179] * Using Docker driver with root privileges
I1121 14:29:25.127166 255774 cni.go:84] Creating CNI manager for ""
I1121 14:29:25.127249 255774 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:29:25.127262 255774 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1121 14:29:25.127353 255774 start.go:353] cluster config:
{Name:default-k8s-diff-port-376255 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-376255 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:
cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath:
StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1121 14:29:25.129454 255774 out.go:179] * Starting "default-k8s-diff-port-376255" primary control-plane node in "default-k8s-diff-port-376255" cluster
I1121 14:29:25.130961 255774 cache.go:134] Beginning downloading kic base image for docker with containerd
I1121 14:29:25.132637 255774 out.go:179] * Pulling base image v0.0.48-1763507788-21924 ...
I1121 14:29:25.134190 255774 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1121 14:29:25.134237 255774 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4
I1121 14:29:25.134251 255774 cache.go:65] Caching tarball of preloaded images
I1121 14:29:25.134262 255774 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a in local docker daemon
I1121 14:29:25.134379 255774 preload.go:238] Found /home/jenkins/minikube-integration/21847-11004/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I1121 14:29:25.134391 255774 cache.go:68] Finished verifying existence of preloaded tar for v1.34.1 on containerd
I1121 14:29:25.134520 255774 profile.go:143] Saving config to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/config.json ...
I1121 14:29:25.134560 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/config.json: {Name:mk1db0ba6952ac549a7eae06783e73916a7ad392 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:25.161339 255774 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a in local docker daemon, skipping pull
I1121 14:29:25.161363 255774 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a exists in daemon, skipping load
I1121 14:29:25.161384 255774 cache.go:243] Successfully downloaded all kic artifacts
I1121 14:29:25.161419 255774 start.go:360] acquireMachinesLock for default-k8s-diff-port-376255: {Name:mka18b3ecaec4bae205bc7951f90400738bef300 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1121 14:29:25.161518 255774 start.go:364] duration metric: took 79.824µs to acquireMachinesLock for "default-k8s-diff-port-376255"
I1121 14:29:25.161561 255774 start.go:93] Provisioning new machine with config: &{Name:default-k8s-diff-port-376255 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-376255 Namespace:default API
ServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Disabl
eCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1121 14:29:25.161653 255774 start.go:125] createHost starting for "" (driver="docker")
I1121 14:29:25.055066 249617 cli_runner.go:164] Run: docker network inspect old-k8s-version-012258 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1121 14:29:25.085953 249617 ssh_runner.go:195] Run: grep 192.168.94.1 host.minikube.internal$ /etc/hosts
I1121 14:29:25.093859 249617 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.94.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1121 14:29:25.111432 249617 kubeadm.go:884] updating cluster {Name:old-k8s-version-012258 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-012258 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.94.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1121 14:29:25.111671 249617 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1121 14:29:25.111753 249617 ssh_runner.go:195] Run: sudo crictl images --output json
I1121 14:29:25.143860 249617 containerd.go:627] all images are preloaded for containerd runtime.
I1121 14:29:25.143888 249617 containerd.go:534] Images already preloaded, skipping extraction
I1121 14:29:25.143953 249617 ssh_runner.go:195] Run: sudo crictl images --output json
I1121 14:29:25.174770 249617 containerd.go:627] all images are preloaded for containerd runtime.
I1121 14:29:25.174789 249617 cache_images.go:86] Images are preloaded, skipping loading
I1121 14:29:25.174797 249617 kubeadm.go:935] updating node { 192.168.94.2 8443 v1.28.0 containerd true true} ...
I1121 14:29:25.174897 249617 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-012258 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.94.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-012258 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1121 14:29:25.174970 249617 ssh_runner.go:195] Run: sudo crictl info
I1121 14:29:25.211311 249617 cni.go:84] Creating CNI manager for ""
I1121 14:29:25.211341 249617 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:29:25.211371 249617 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1121 14:29:25.211401 249617 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.94.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-012258 NodeName:old-k8s-version-012258 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.94.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.94.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt S
taticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1121 14:29:25.211596 249617 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.94.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-012258"
kubeletExtraArgs:
node-ip: 192.168.94.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.94.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1121 14:29:25.211673 249617 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1121 14:29:25.224124 249617 binaries.go:51] Found k8s binaries, skipping transfer
I1121 14:29:25.224202 249617 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1121 14:29:25.235430 249617 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1121 14:29:25.254181 249617 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1121 14:29:25.283842 249617 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2175 bytes)
I1121 14:29:25.302971 249617 ssh_runner.go:195] Run: grep 192.168.94.2 control-plane.minikube.internal$ /etc/hosts
I1121 14:29:25.309092 249617 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.94.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1121 14:29:25.325170 249617 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:25.438037 249617 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:29:25.469767 249617 certs.go:69] Setting up /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258 for IP: 192.168.94.2
I1121 14:29:25.469790 249617 certs.go:195] generating shared ca certs ...
I1121 14:29:25.469811 249617 certs.go:227] acquiring lock for ca certs: {Name:mk4ac68319839cd6684afc66121341297238277f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:25.470023 249617 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21847-11004/.minikube/ca.key
I1121 14:29:25.470095 249617 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.key
I1121 14:29:25.470105 249617 certs.go:257] generating profile certs ...
I1121 14:29:25.470177 249617 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/client.key
I1121 14:29:25.470199 249617 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/client.crt with IP's: []
I1121 14:29:25.634340 249617 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/client.crt ...
I1121 14:29:25.634374 249617 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/client.crt: {Name:mk5e1a3132436dad740351857d527e3c45fff4e4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:25.648586 249617 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/client.key ...
I1121 14:29:25.648625 249617 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/client.key: {Name:mk757010d91a13b26eb1340def496546bee9bf26 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:25.648791 249617 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.key.a13049cc
I1121 14:29:25.648816 249617 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.crt.a13049cc with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.94.2]
I1121 14:29:25.817862 249617 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.crt.a13049cc ...
I1121 14:29:25.817892 249617 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.crt.a13049cc: {Name:mk8a482343e99af6e8bdd7e52a6e5b813685beb7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:25.818099 249617 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.key.a13049cc ...
I1121 14:29:25.818121 249617 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.key.a13049cc: {Name:mk4cf761e884b2a77e105e39ad6b0495b59b5aee Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:25.818237 249617 certs.go:382] copying /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.crt.a13049cc -> /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.crt
I1121 14:29:25.818331 249617 certs.go:386] copying /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.key.a13049cc -> /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.key
I1121 14:29:25.818390 249617 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.key
I1121 14:29:25.818406 249617 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.crt with IP's: []
I1121 14:29:26.390351 249617 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.crt ...
I1121 14:29:26.390391 249617 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.crt: {Name:mk37207f300780275f6aa5331fc436d60739196c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:26.390599 249617 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.key ...
I1121 14:29:26.390617 249617 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.key: {Name:mkff5d416178c38a50235608b783c3957bee8456 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:26.390849 249617 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523.pem (1338 bytes)
W1121 14:29:26.390898 249617 certs.go:480] ignoring /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523_empty.pem, impossibly tiny 0 bytes
I1121 14:29:26.390913 249617 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca-key.pem (1675 bytes)
I1121 14:29:26.390946 249617 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem (1078 bytes)
I1121 14:29:26.390988 249617 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem (1123 bytes)
I1121 14:29:26.391029 249617 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/key.pem (1675 bytes)
I1121 14:29:26.391086 249617 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem (1708 bytes)
I1121 14:29:26.391817 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1121 14:29:26.418450 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I1121 14:29:26.446063 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1121 14:29:26.469197 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1121 14:29:26.493823 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1121 14:29:26.526847 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1121 14:29:26.555176 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1121 14:29:25.915600 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:29:25.916118 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:29:25.916177 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:29:25.916228 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:29:25.948057 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:25.948080 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:25.948087 213058 cri.go:89] found id: ""
I1121 14:29:25.948096 213058 logs.go:282] 2 containers: [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:29:25.948160 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:25.952634 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:25.956801 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:29:25.956870 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:29:25.990988 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:25.991014 213058 cri.go:89] found id: ""
I1121 14:29:25.991024 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:29:25.991083 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:25.995665 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:29:25.995736 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:29:26.031577 213058 cri.go:89] found id: ""
I1121 14:29:26.031604 213058 logs.go:282] 0 containers: []
W1121 14:29:26.031612 213058 logs.go:284] No container was found matching "coredns"
I1121 14:29:26.031618 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:29:26.031665 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:29:26.064880 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:26.064907 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:26.064912 213058 cri.go:89] found id: ""
I1121 14:29:26.064922 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:29:26.064979 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:26.070274 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:26.075659 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:29:26.075731 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:29:26.108079 213058 cri.go:89] found id: ""
I1121 14:29:26.108108 213058 logs.go:282] 0 containers: []
W1121 14:29:26.108118 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:29:26.108125 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:29:26.108181 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:29:26.138988 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:26.139018 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:26.139024 213058 cri.go:89] found id: ""
I1121 14:29:26.139034 213058 logs.go:282] 2 containers: [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:29:26.139096 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:26.143487 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:26.147564 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:29:26.147631 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:29:26.185747 213058 cri.go:89] found id: ""
I1121 14:29:26.185774 213058 logs.go:282] 0 containers: []
W1121 14:29:26.185785 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:29:26.185793 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:29:26.185848 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:29:26.220265 213058 cri.go:89] found id: ""
I1121 14:29:26.220296 213058 logs.go:282] 0 containers: []
W1121 14:29:26.220308 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:29:26.220321 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:29:26.220335 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:26.265042 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:29:26.265072 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:29:26.402636 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:29:26.402672 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:29:26.484531 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:29:26.484565 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:29:26.484581 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:26.534239 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:29:26.534294 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:26.579971 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:29:26.580016 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:29:26.643693 213058 logs.go:123] Gathering logs for container status ...
I1121 14:29:26.643727 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:29:26.683712 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:29:26.683748 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:29:26.702800 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:29:26.702836 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:26.741813 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:29:26.741845 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:26.812944 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:29:26.812997 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:26.855307 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:29:26.855347 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:24.308535 252125 containerd.go:267] Checking existence of image with name "registry.k8s.io/coredns/coredns:v1.12.1" and sha "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969"
I1121 14:29:24.308619 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/coredns/coredns:v1.12.1
I1121 14:29:24.317176 252125 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.34.1" and sha "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7"
I1121 14:29:24.317245 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-proxy:v1.34.1
I1121 14:29:24.318774 252125 cache_images.go:118] "registry.k8s.io/etcd:3.6.4-0" needs transfer: "registry.k8s.io/etcd:3.6.4-0" does not exist at hash "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115" in container runtime
I1121 14:29:24.318825 252125 cri.go:218] Removing image: registry.k8s.io/etcd:3.6.4-0
I1121 14:29:24.318867 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.328208 252125 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.34.1" and sha "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f"
I1121 14:29:24.328249 252125 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.34.1" and sha "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813"
I1121 14:29:24.328291 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-controller-manager:v1.34.1
I1121 14:29:24.328305 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-scheduler:v1.34.1
I1121 14:29:24.328664 252125 containerd.go:267] Checking existence of image with name "registry.k8s.io/pause:3.10.1" and sha "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f"
I1121 14:29:24.328708 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/pause:3.10.1
I1121 14:29:24.335839 252125 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.34.1" and sha "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97"
I1121 14:29:24.335900 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-apiserver:v1.34.1
I1121 14:29:24.337631 252125 cache_images.go:118] "registry.k8s.io/coredns/coredns:v1.12.1" needs transfer: "registry.k8s.io/coredns/coredns:v1.12.1" does not exist at hash "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969" in container runtime
I1121 14:29:24.337672 252125 cri.go:218] Removing image: registry.k8s.io/coredns/coredns:v1.12.1
I1121 14:29:24.337713 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.346363 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1121 14:29:24.346443 252125 cache_images.go:118] "registry.k8s.io/kube-proxy:v1.34.1" needs transfer: "registry.k8s.io/kube-proxy:v1.34.1" does not exist at hash "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7" in container runtime
I1121 14:29:24.346484 252125 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.34.1
I1121 14:29:24.346517 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.361284 252125 cache_images.go:118] "registry.k8s.io/kube-controller-manager:v1.34.1" needs transfer: "registry.k8s.io/kube-controller-manager:v1.34.1" does not exist at hash "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f" in container runtime
I1121 14:29:24.361331 252125 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.34.1
I1121 14:29:24.361375 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.361424 252125 cache_images.go:118] "registry.k8s.io/kube-scheduler:v1.34.1" needs transfer: "registry.k8s.io/kube-scheduler:v1.34.1" does not exist at hash "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813" in container runtime
I1121 14:29:24.361445 252125 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.34.1
I1121 14:29:24.361477 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.366787 252125 cache_images.go:118] "registry.k8s.io/pause:3.10.1" needs transfer: "registry.k8s.io/pause:3.10.1" does not exist at hash "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f" in container runtime
I1121 14:29:24.366831 252125 cri.go:218] Removing image: registry.k8s.io/pause:3.10.1
I1121 14:29:24.366871 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.379457 252125 cache_images.go:118] "registry.k8s.io/kube-apiserver:v1.34.1" needs transfer: "registry.k8s.io/kube-apiserver:v1.34.1" does not exist at hash "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97" in container runtime
I1121 14:29:24.379503 252125 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.34.1
I1121 14:29:24.379558 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.379677 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1121 14:29:24.388569 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1121 14:29:24.388608 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1121 14:29:24.388658 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1121 14:29:24.388681 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1121 14:29:24.388574 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1121 14:29:24.418705 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1121 14:29:24.418763 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1121 14:29:24.427350 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1121 14:29:24.434639 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1121 14:29:24.434777 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1121 14:29:24.437430 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1121 14:29:24.437452 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1121 14:29:24.477986 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1121 14:29:24.478027 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1121 14:29:24.478099 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1121 14:29:24.478334 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0
I1121 14:29:24.478136 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1121 14:29:24.485019 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1121 14:29:24.485026 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1121 14:29:24.489362 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1121 14:29:24.521124 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1121 14:29:24.521651 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1121 14:29:24.521767 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1
I1121 14:29:24.553384 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/etcd_3.6.4-0: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/etcd_3.6.4-0': No such file or directory
I1121 14:29:24.553425 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 --> /var/lib/minikube/images/etcd_3.6.4-0 (74320896 bytes)
I1121 14:29:24.553522 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1121 14:29:24.553632 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1
I1121 14:29:24.553699 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1121 14:29:24.553755 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1
I1121 14:29:24.553769 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1121 14:29:24.553803 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1121 14:29:24.553853 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1
I1121 14:29:24.553860 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1
I1121 14:29:24.553893 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1121 14:29:24.553920 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/coredns_v1.12.1: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/coredns_v1.12.1': No such file or directory
I1121 14:29:24.553945 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1121 14:29:24.553945 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 --> /var/lib/minikube/images/coredns_v1.12.1 (22394368 bytes)
I1121 14:29:24.565027 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-proxy_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-proxy_v1.34.1': No such file or directory
I1121 14:29:24.565077 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 --> /var/lib/minikube/images/kube-proxy_v1.34.1 (25966080 bytes)
I1121 14:29:24.565153 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-apiserver_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-apiserver_v1.34.1': No such file or directory
I1121 14:29:24.565169 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 --> /var/lib/minikube/images/kube-apiserver_v1.34.1 (27073024 bytes)
I1121 14:29:24.574297 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-controller-manager_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-controller-manager_v1.34.1': No such file or directory
I1121 14:29:24.574338 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 --> /var/lib/minikube/images/kube-controller-manager_v1.34.1 (22831104 bytes)
I1121 14:29:24.574363 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/pause_3.10.1: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/pause_3.10.1': No such file or directory
I1121 14:29:24.574390 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-scheduler_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-scheduler_v1.34.1': No such file or directory
I1121 14:29:24.574393 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 --> /var/lib/minikube/images/pause_3.10.1 (321024 bytes)
I1121 14:29:24.574407 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 --> /var/lib/minikube/images/kube-scheduler_v1.34.1 (17396736 bytes)
I1121 14:29:24.784169 252125 containerd.go:285] Loading image: /var/lib/minikube/images/pause_3.10.1
I1121 14:29:24.784246 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/pause_3.10.1
I1121 14:29:24.964305 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 from cache
I1121 14:29:25.029557 252125 containerd.go:285] Loading image: /var/lib/minikube/images/kube-scheduler_v1.34.1
I1121 14:29:25.029626 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1
I1121 14:29:25.445459 252125 containerd.go:267] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562"
I1121 14:29:25.445578 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:26.691152 252125 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1: (1.661495413s)
I1121 14:29:26.691188 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 from cache
I1121 14:29:26.691209 252125 containerd.go:285] Loading image: /var/lib/minikube/images/coredns_v1.12.1
I1121 14:29:26.691206 252125 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5: (1.245604103s)
I1121 14:29:26.691250 252125 cache_images.go:118] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562" in container runtime
I1121 14:29:26.691264 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1
I1121 14:29:26.691297 252125 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:26.691347 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:26.696141 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:28.100615 252125 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.404441617s)
I1121 14:29:28.100696 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:28.100615 252125 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1: (1.409327822s)
I1121 14:29:28.100767 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 from cache
I1121 14:29:28.100803 252125 containerd.go:285] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1121 14:29:28.100853 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1121 14:29:28.132780 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:25.163849 255774 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1121 14:29:25.164318 255774 start.go:159] libmachine.API.Create for "default-k8s-diff-port-376255" (driver="docker")
I1121 14:29:25.164395 255774 client.go:173] LocalClient.Create starting
I1121 14:29:25.164513 255774 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem
I1121 14:29:25.164575 255774 main.go:143] libmachine: Decoding PEM data...
I1121 14:29:25.164605 255774 main.go:143] libmachine: Parsing certificate...
I1121 14:29:25.164704 255774 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem
I1121 14:29:25.164760 255774 main.go:143] libmachine: Decoding PEM data...
I1121 14:29:25.164776 255774 main.go:143] libmachine: Parsing certificate...
I1121 14:29:25.165330 255774 cli_runner.go:164] Run: docker network inspect default-k8s-diff-port-376255 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1121 14:29:25.188513 255774 cli_runner.go:211] docker network inspect default-k8s-diff-port-376255 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1121 14:29:25.188614 255774 network_create.go:284] running [docker network inspect default-k8s-diff-port-376255] to gather additional debugging logs...
I1121 14:29:25.188640 255774 cli_runner.go:164] Run: docker network inspect default-k8s-diff-port-376255
W1121 14:29:25.213297 255774 cli_runner.go:211] docker network inspect default-k8s-diff-port-376255 returned with exit code 1
I1121 14:29:25.213338 255774 network_create.go:287] error running [docker network inspect default-k8s-diff-port-376255]: docker network inspect default-k8s-diff-port-376255: exit status 1
stdout:
[]
stderr:
Error response from daemon: network default-k8s-diff-port-376255 not found
I1121 14:29:25.213435 255774 network_create.go:289] output of [docker network inspect default-k8s-diff-port-376255]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network default-k8s-diff-port-376255 not found
** /stderr **
I1121 14:29:25.213589 255774 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1121 14:29:25.240844 255774 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-66cfc06dc768 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:3a:44:28:22:82:94} reservation:<nil>}
I1121 14:29:25.241874 255774 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-39921db0d513 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:76:e4:85:98:a5:e3} reservation:<nil>}
I1121 14:29:25.242975 255774 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-36a8741c90a2 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:4a:21:99:72:63:4a} reservation:<nil>}
I1121 14:29:25.244042 255774 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-63d543fc8bbd IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:c2:58:40:d2:33:c4} reservation:<nil>}
I1121 14:29:25.245269 255774 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001eb46e0}
I1121 14:29:25.245303 255774 network_create.go:124] attempt to create docker network default-k8s-diff-port-376255 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1121 14:29:25.245384 255774 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=default-k8s-diff-port-376255 default-k8s-diff-port-376255
I1121 14:29:25.322210 255774 network_create.go:108] docker network default-k8s-diff-port-376255 192.168.85.0/24 created
I1121 14:29:25.322244 255774 kic.go:121] calculated static IP "192.168.85.2" for the "default-k8s-diff-port-376255" container
I1121 14:29:25.322309 255774 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1121 14:29:25.346732 255774 cli_runner.go:164] Run: docker volume create default-k8s-diff-port-376255 --label name.minikube.sigs.k8s.io=default-k8s-diff-port-376255 --label created_by.minikube.sigs.k8s.io=true
I1121 14:29:25.374919 255774 oci.go:103] Successfully created a docker volume default-k8s-diff-port-376255
I1121 14:29:25.374994 255774 cli_runner.go:164] Run: docker run --rm --name default-k8s-diff-port-376255-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=default-k8s-diff-port-376255 --entrypoint /usr/bin/test -v default-k8s-diff-port-376255:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a -d /var/lib
I1121 14:29:26.343288 255774 oci.go:107] Successfully prepared a docker volume default-k8s-diff-port-376255
I1121 14:29:26.343370 255774 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1121 14:29:26.343387 255774 kic.go:194] Starting extracting preloaded images to volume ...
I1121 14:29:26.343457 255774 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21847-11004/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v default-k8s-diff-port-376255:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a -I lz4 -xf /preloaded.tar -C /extractDir
I1121 14:29:26.582319 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1121 14:29:26.606403 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523.pem --> /usr/share/ca-certificates/14523.pem (1338 bytes)
I1121 14:29:26.635408 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem --> /usr/share/ca-certificates/145232.pem (1708 bytes)
I1121 14:29:26.661287 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1121 14:29:26.686582 249617 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1121 14:29:26.703157 249617 ssh_runner.go:195] Run: openssl version
I1121 14:29:26.712353 249617 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/14523.pem && ln -fs /usr/share/ca-certificates/14523.pem /etc/ssl/certs/14523.pem"
I1121 14:29:26.725593 249617 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/14523.pem
I1121 14:29:26.732381 249617 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 21 14:01 /usr/share/ca-certificates/14523.pem
I1121 14:29:26.732523 249617 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/14523.pem
I1121 14:29:26.774823 249617 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/14523.pem /etc/ssl/certs/51391683.0"
I1121 14:29:26.785127 249617 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/145232.pem && ln -fs /usr/share/ca-certificates/145232.pem /etc/ssl/certs/145232.pem"
I1121 14:29:26.796035 249617 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/145232.pem
I1121 14:29:26.800685 249617 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 21 14:01 /usr/share/ca-certificates/145232.pem
I1121 14:29:26.800751 249617 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/145232.pem
I1121 14:29:26.842185 249617 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/145232.pem /etc/ssl/certs/3ec20f2e.0"
I1121 14:29:26.852632 249617 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1121 14:29:26.863838 249617 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:26.869571 249617 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 21 13:56 /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:26.869642 249617 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:26.922017 249617 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1121 14:29:26.934065 249617 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1121 14:29:26.939457 249617 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1121 14:29:26.939526 249617 kubeadm.go:401] StartCluster: {Name:old-k8s-version-012258 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-012258 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.94.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1121 14:29:26.939648 249617 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1121 14:29:26.939710 249617 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1121 14:29:26.978114 249617 cri.go:89] found id: ""
I1121 14:29:26.978192 249617 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1121 14:29:26.989363 249617 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1121 14:29:27.000529 249617 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1121 14:29:27.000603 249617 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1121 14:29:27.012158 249617 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1121 14:29:27.012179 249617 kubeadm.go:158] found existing configuration files:
I1121 14:29:27.012231 249617 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1121 14:29:27.022084 249617 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1121 14:29:27.022141 249617 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1121 14:29:27.034139 249617 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1121 14:29:27.044897 249617 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1121 14:29:27.045038 249617 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1121 14:29:27.056593 249617 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1121 14:29:27.066532 249617 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1121 14:29:27.066615 249617 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1121 14:29:27.077925 249617 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1121 14:29:27.088254 249617 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1121 14:29:27.088320 249617 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1121 14:29:27.098442 249617 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1121 14:29:27.205509 249617 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1121 14:29:27.290009 249617 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1121 14:29:29.388121 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:29:29.388594 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:29:29.388645 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:29:29.388690 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:29:29.416964 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:29.416991 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:29.416996 213058 cri.go:89] found id: ""
I1121 14:29:29.417006 213058 logs.go:282] 2 containers: [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:29:29.417074 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:29.421476 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:29.425483 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:29:29.425557 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:29:29.453687 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:29.453708 213058 cri.go:89] found id: ""
I1121 14:29:29.453718 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:29:29.453783 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:29.458267 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:29:29.458353 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:29:29.485804 213058 cri.go:89] found id: ""
I1121 14:29:29.485865 213058 logs.go:282] 0 containers: []
W1121 14:29:29.485876 213058 logs.go:284] No container was found matching "coredns"
I1121 14:29:29.485883 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:29:29.485940 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:29:29.514265 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:29.514290 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:29.514294 213058 cri.go:89] found id: ""
I1121 14:29:29.514302 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:29:29.514349 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:29.518626 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:29.522446 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:29:29.522501 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:29:29.549770 213058 cri.go:89] found id: ""
I1121 14:29:29.549799 213058 logs.go:282] 0 containers: []
W1121 14:29:29.549811 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:29:29.549819 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:29:29.549868 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:29:29.577193 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:29.577217 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:29.577222 213058 cri.go:89] found id: ""
I1121 14:29:29.577230 213058 logs.go:282] 2 containers: [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:29:29.577288 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:29.581256 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:29.585291 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:29:29.585347 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:29:29.614632 213058 cri.go:89] found id: ""
I1121 14:29:29.614664 213058 logs.go:282] 0 containers: []
W1121 14:29:29.614674 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:29:29.614682 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:29:29.614740 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:29:29.645697 213058 cri.go:89] found id: ""
I1121 14:29:29.645721 213058 logs.go:282] 0 containers: []
W1121 14:29:29.645730 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:29:29.645741 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:29:29.645756 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:29.675578 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:29:29.675607 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:29:29.718952 213058 logs.go:123] Gathering logs for container status ...
I1121 14:29:29.718990 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:29:29.750089 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:29:29.750117 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:29:29.858708 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:29:29.858738 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:29.902976 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:29:29.903013 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:29.938083 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:29:29.938118 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:29.976329 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:29:29.976366 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:29:29.991448 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:29:29.991485 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:29:30.053990 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:29:30.054015 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:29:30.054032 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:30.089042 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:29:30.089076 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:30.124498 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:29:30.124528 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:32.685601 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:29:32.686035 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:29:32.686089 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:29:32.686144 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:29:32.744948 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:32.745095 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:32.745132 213058 cri.go:89] found id: ""
I1121 14:29:32.745169 213058 logs.go:282] 2 containers: [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:29:32.745355 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:32.752020 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:32.760837 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:29:32.761106 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:29:32.807418 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:32.807451 213058 cri.go:89] found id: ""
I1121 14:29:32.807462 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:29:32.807521 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:32.813216 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:29:32.813289 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:29:32.852598 213058 cri.go:89] found id: ""
I1121 14:29:32.852633 213058 logs.go:282] 0 containers: []
W1121 14:29:32.852645 213058 logs.go:284] No container was found matching "coredns"
I1121 14:29:32.852653 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:29:32.852711 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:29:32.889120 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:32.889144 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:32.889148 213058 cri.go:89] found id: ""
I1121 14:29:32.889157 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:29:32.889211 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:32.894834 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:32.900572 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:29:32.900646 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:29:32.937810 213058 cri.go:89] found id: ""
I1121 14:29:32.937836 213058 logs.go:282] 0 containers: []
W1121 14:29:32.937846 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:29:32.937853 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:29:32.937914 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:29:32.975713 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:32.975735 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:32.975741 213058 cri.go:89] found id: ""
I1121 14:29:32.975751 213058 logs.go:282] 2 containers: [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:29:32.975815 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:32.981574 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:32.985965 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:29:32.986030 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:29:33.019894 213058 cri.go:89] found id: ""
I1121 14:29:33.019923 213058 logs.go:282] 0 containers: []
W1121 14:29:33.019935 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:29:33.019949 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:29:33.020009 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:29:33.051872 213058 cri.go:89] found id: ""
I1121 14:29:33.051901 213058 logs.go:282] 0 containers: []
W1121 14:29:33.051911 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:29:33.051923 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:29:33.051937 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:33.103114 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:29:33.103153 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:33.142816 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:29:33.142846 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:29:33.209677 213058 logs.go:123] Gathering logs for container status ...
I1121 14:29:33.209736 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:29:33.255185 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:29:33.255220 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:29:33.272562 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:29:33.272600 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:33.319098 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:29:33.319132 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:33.366245 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:29:33.366286 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:33.410624 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:29:33.410660 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:33.458217 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:29:33.458253 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:29:33.586879 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:29:33.586919 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1121 14:29:29.835800 252125 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1: (1.734910291s)
I1121 14:29:29.835838 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 from cache
I1121 14:29:29.835860 252125 containerd.go:285] Loading image: /var/lib/minikube/images/kube-proxy_v1.34.1
I1121 14:29:29.835902 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1
I1121 14:29:29.835802 252125 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.702989246s)
I1121 14:29:29.835965 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5
I1121 14:29:29.836056 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5
I1121 14:29:29.840842 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/storage-provisioner_v5: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/storage-provisioner_v5': No such file or directory
I1121 14:29:29.840873 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 --> /var/lib/minikube/images/storage-provisioner_v5 (9060352 bytes)
I1121 14:29:32.866902 252125 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1: (3.030968163s)
I1121 14:29:32.866941 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 from cache
I1121 14:29:32.866961 252125 containerd.go:285] Loading image: /var/lib/minikube/images/kube-apiserver_v1.34.1
I1121 14:29:32.867002 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1
I1121 14:29:31.901829 255774 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21847-11004/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v default-k8s-diff-port-376255:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a -I lz4 -xf /preloaded.tar -C /extractDir: (5.558304176s)
I1121 14:29:31.901864 255774 kic.go:203] duration metric: took 5.558473353s to extract preloaded images to volume ...
W1121 14:29:31.901941 255774 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1121 14:29:31.901969 255774 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1121 14:29:31.902010 255774 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1121 14:29:31.985847 255774 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname default-k8s-diff-port-376255 --name default-k8s-diff-port-376255 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=default-k8s-diff-port-376255 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=default-k8s-diff-port-376255 --network default-k8s-diff-port-376255 --ip 192.168.85.2 --volume default-k8s-diff-port-376255:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8444 --publish=127.0.0.1::8444 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a
I1121 14:29:32.403824 255774 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-376255 --format={{.State.Running}}
I1121 14:29:32.427802 255774 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-376255 --format={{.State.Status}}
I1121 14:29:32.456228 255774 cli_runner.go:164] Run: docker exec default-k8s-diff-port-376255 stat /var/lib/dpkg/alternatives/iptables
I1121 14:29:32.514766 255774 oci.go:144] the created container "default-k8s-diff-port-376255" has a running status.
I1121 14:29:32.514799 255774 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa...
I1121 14:29:32.829505 255774 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1121 14:29:32.861911 255774 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-376255 --format={{.State.Status}}
I1121 14:29:32.888316 255774 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1121 14:29:32.888342 255774 kic_runner.go:114] Args: [docker exec --privileged default-k8s-diff-port-376255 chown docker:docker /home/docker/.ssh/authorized_keys]
I1121 14:29:32.948121 255774 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-376255 --format={{.State.Status}}
I1121 14:29:32.975355 255774 machine.go:94] provisionDockerMachine start ...
I1121 14:29:32.975799 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:33.002463 255774 main.go:143] libmachine: Using SSH client type: native
I1121 14:29:33.002813 255774 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33070 <nil> <nil>}
I1121 14:29:33.002834 255774 main.go:143] libmachine: About to run SSH command:
hostname
I1121 14:29:33.003677 255774 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:37682->127.0.0.1:33070: read: connection reset by peer
I1121 14:29:37.228254 249617 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1121 14:29:37.228434 249617 kubeadm.go:319] [preflight] Running pre-flight checks
I1121 14:29:37.228644 249617 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1121 14:29:37.228822 249617 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1121 14:29:37.228907 249617 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1121 14:29:37.228971 249617 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1121 14:29:37.229029 249617 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1121 14:29:37.229111 249617 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1121 14:29:37.229198 249617 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1121 14:29:37.229264 249617 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1121 14:29:37.229333 249617 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1121 14:29:37.229403 249617 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1121 14:29:37.229468 249617 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1121 14:29:37.229624 249617 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1121 14:29:37.229762 249617 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1121 14:29:37.229892 249617 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1121 14:29:37.230051 249617 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1121 14:29:37.235113 249617 out.go:252] - Generating certificates and keys ...
I1121 14:29:37.235306 249617 kubeadm.go:319] [certs] Using existing ca certificate authority
I1121 14:29:37.235508 249617 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1121 14:29:37.235691 249617 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1121 14:29:37.235858 249617 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1121 14:29:37.236102 249617 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1121 14:29:37.236205 249617 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1121 14:29:37.236303 249617 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1121 14:29:37.236516 249617 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-012258] and IPs [192.168.94.2 127.0.0.1 ::1]
I1121 14:29:37.236607 249617 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1121 14:29:37.236765 249617 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-012258] and IPs [192.168.94.2 127.0.0.1 ::1]
I1121 14:29:37.236861 249617 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1121 14:29:37.236954 249617 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1121 14:29:37.237021 249617 kubeadm.go:319] [certs] Generating "sa" key and public key
I1121 14:29:37.237104 249617 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1121 14:29:37.237178 249617 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1121 14:29:37.237257 249617 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1121 14:29:37.237352 249617 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1121 14:29:37.237438 249617 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1121 14:29:37.237554 249617 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1121 14:29:37.237649 249617 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1121 14:29:37.239227 249617 out.go:252] - Booting up control plane ...
I1121 14:29:37.239369 249617 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1121 14:29:37.239534 249617 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1121 14:29:37.239682 249617 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1121 14:29:37.239829 249617 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1121 14:29:37.239965 249617 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1121 14:29:37.240022 249617 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1121 14:29:37.240260 249617 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1121 14:29:37.240373 249617 kubeadm.go:319] [apiclient] All control plane components are healthy after 5.503152 seconds
I1121 14:29:37.240759 249617 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1121 14:29:37.240933 249617 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1121 14:29:37.241035 249617 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1121 14:29:37.241286 249617 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-012258 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1121 14:29:37.241409 249617 kubeadm.go:319] [bootstrap-token] Using token: yix385.n0xejrlt7sdx1ngs
I1121 14:29:37.243198 249617 out.go:252] - Configuring RBAC rules ...
I1121 14:29:37.243379 249617 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1121 14:29:37.243497 249617 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1121 14:29:37.243755 249617 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1121 14:29:37.243946 249617 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1121 14:29:37.244147 249617 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1121 14:29:37.244287 249617 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1121 14:29:37.244477 249617 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1121 14:29:37.244564 249617 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1121 14:29:37.244632 249617 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1121 14:29:37.244642 249617 kubeadm.go:319]
I1121 14:29:37.244725 249617 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1121 14:29:37.244736 249617 kubeadm.go:319]
I1121 14:29:37.244834 249617 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1121 14:29:37.244845 249617 kubeadm.go:319]
I1121 14:29:37.244877 249617 kubeadm.go:319] mkdir -p $HOME/.kube
I1121 14:29:37.244966 249617 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1121 14:29:37.245033 249617 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1121 14:29:37.245045 249617 kubeadm.go:319]
I1121 14:29:37.245111 249617 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1121 14:29:37.245120 249617 kubeadm.go:319]
I1121 14:29:37.245178 249617 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1121 14:29:37.245192 249617 kubeadm.go:319]
I1121 14:29:37.245274 249617 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1121 14:29:37.245371 249617 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1121 14:29:37.245468 249617 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1121 14:29:37.245476 249617 kubeadm.go:319]
I1121 14:29:37.245604 249617 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1121 14:29:37.245734 249617 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1121 14:29:37.245755 249617 kubeadm.go:319]
I1121 14:29:37.245866 249617 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token yix385.n0xejrlt7sdx1ngs \
I1121 14:29:37.246024 249617 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:2aad198f79b3258593291a08f0028a72548d0fc82d6b54639b7d7d17a52adfdb \
I1121 14:29:37.246062 249617 kubeadm.go:319] --control-plane
I1121 14:29:37.246072 249617 kubeadm.go:319]
I1121 14:29:37.246178 249617 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1121 14:29:37.246189 249617 kubeadm.go:319]
I1121 14:29:37.246294 249617 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token yix385.n0xejrlt7sdx1ngs \
I1121 14:29:37.246443 249617 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:2aad198f79b3258593291a08f0028a72548d0fc82d6b54639b7d7d17a52adfdb
I1121 14:29:37.246454 249617 cni.go:84] Creating CNI manager for ""
I1121 14:29:37.246462 249617 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:29:37.248274 249617 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1121 14:29:36.147516 255774 main.go:143] libmachine: SSH cmd err, output: <nil>: default-k8s-diff-port-376255
I1121 14:29:36.147569 255774 ubuntu.go:182] provisioning hostname "default-k8s-diff-port-376255"
I1121 14:29:36.147633 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:36.169609 255774 main.go:143] libmachine: Using SSH client type: native
I1121 14:29:36.169898 255774 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33070 <nil> <nil>}
I1121 14:29:36.169928 255774 main.go:143] libmachine: About to run SSH command:
sudo hostname default-k8s-diff-port-376255 && echo "default-k8s-diff-port-376255" | sudo tee /etc/hostname
I1121 14:29:36.328958 255774 main.go:143] libmachine: SSH cmd err, output: <nil>: default-k8s-diff-port-376255
I1121 14:29:36.329040 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:36.353105 255774 main.go:143] libmachine: Using SSH client type: native
I1121 14:29:36.353414 255774 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33070 <nil> <nil>}
I1121 14:29:36.353448 255774 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sdefault-k8s-diff-port-376255' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 default-k8s-diff-port-376255/g' /etc/hosts;
else
echo '127.0.1.1 default-k8s-diff-port-376255' | sudo tee -a /etc/hosts;
fi
fi
I1121 14:29:36.504067 255774 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1121 14:29:36.504097 255774 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21847-11004/.minikube CaCertPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21847-11004/.minikube}
I1121 14:29:36.504119 255774 ubuntu.go:190] setting up certificates
I1121 14:29:36.504133 255774 provision.go:84] configureAuth start
I1121 14:29:36.504206 255774 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" default-k8s-diff-port-376255
I1121 14:29:36.528674 255774 provision.go:143] copyHostCerts
I1121 14:29:36.528752 255774 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-11004/.minikube/ca.pem, removing ...
I1121 14:29:36.528762 255774 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-11004/.minikube/ca.pem
I1121 14:29:36.528840 255774 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21847-11004/.minikube/ca.pem (1078 bytes)
I1121 14:29:36.528968 255774 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-11004/.minikube/cert.pem, removing ...
I1121 14:29:36.528997 255774 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-11004/.minikube/cert.pem
I1121 14:29:36.529043 255774 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21847-11004/.minikube/cert.pem (1123 bytes)
I1121 14:29:36.529141 255774 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-11004/.minikube/key.pem, removing ...
I1121 14:29:36.529152 255774 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-11004/.minikube/key.pem
I1121 14:29:36.529188 255774 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21847-11004/.minikube/key.pem (1675 bytes)
I1121 14:29:36.529281 255774 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21847-11004/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca-key.pem org=jenkins.default-k8s-diff-port-376255 san=[127.0.0.1 192.168.85.2 default-k8s-diff-port-376255 localhost minikube]
I1121 14:29:36.617208 255774 provision.go:177] copyRemoteCerts
I1121 14:29:36.617283 255774 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1121 14:29:36.617345 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:36.639948 255774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33070 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa Username:docker}
I1121 14:29:36.749486 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1121 14:29:36.777360 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/machines/server.pem --> /etc/docker/server.pem (1249 bytes)
I1121 14:29:36.804875 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1121 14:29:36.830920 255774 provision.go:87] duration metric: took 326.762892ms to configureAuth
I1121 14:29:36.830953 255774 ubuntu.go:206] setting minikube options for container-runtime
I1121 14:29:36.831165 255774 config.go:182] Loaded profile config "default-k8s-diff-port-376255": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1121 14:29:36.831181 255774 machine.go:97] duration metric: took 3.855604158s to provisionDockerMachine
I1121 14:29:36.831191 255774 client.go:176] duration metric: took 11.666782197s to LocalClient.Create
I1121 14:29:36.831216 255774 start.go:167] duration metric: took 11.666902979s to libmachine.API.Create "default-k8s-diff-port-376255"
I1121 14:29:36.831234 255774 start.go:293] postStartSetup for "default-k8s-diff-port-376255" (driver="docker")
I1121 14:29:36.831254 255774 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1121 14:29:36.831311 255774 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1121 14:29:36.831360 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:36.855811 255774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33070 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa Username:docker}
I1121 14:29:36.969760 255774 ssh_runner.go:195] Run: cat /etc/os-release
I1121 14:29:36.974452 255774 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1121 14:29:36.974529 255774 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1121 14:29:36.974577 255774 filesync.go:126] Scanning /home/jenkins/minikube-integration/21847-11004/.minikube/addons for local assets ...
I1121 14:29:36.974658 255774 filesync.go:126] Scanning /home/jenkins/minikube-integration/21847-11004/.minikube/files for local assets ...
I1121 14:29:36.974771 255774 filesync.go:149] local asset: /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem -> 145232.pem in /etc/ssl/certs
I1121 14:29:36.974903 255774 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1121 14:29:36.984975 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem --> /etc/ssl/certs/145232.pem (1708 bytes)
I1121 14:29:37.017462 255774 start.go:296] duration metric: took 186.210262ms for postStartSetup
I1121 14:29:37.017947 255774 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" default-k8s-diff-port-376255
I1121 14:29:37.041309 255774 profile.go:143] Saving config to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/config.json ...
I1121 14:29:37.041659 255774 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1121 14:29:37.041731 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:37.070697 255774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33070 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa Username:docker}
I1121 14:29:37.177189 255774 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1121 14:29:37.185711 255774 start.go:128] duration metric: took 12.024042461s to createHost
I1121 14:29:37.185741 255774 start.go:83] releasing machines lock for "default-k8s-diff-port-376255", held for 12.024206528s
I1121 14:29:37.185820 255774 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" default-k8s-diff-port-376255
I1121 14:29:37.211853 255774 ssh_runner.go:195] Run: cat /version.json
I1121 14:29:37.211903 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:37.211965 255774 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1121 14:29:37.212033 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:37.238575 255774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33070 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa Username:docker}
I1121 14:29:37.242252 255774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33070 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa Username:docker}
I1121 14:29:37.421321 255774 ssh_runner.go:195] Run: systemctl --version
I1121 14:29:37.431728 255774 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1121 14:29:37.437939 255774 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1121 14:29:37.438053 255774 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1121 14:29:37.469409 255774 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1121 14:29:37.469437 255774 start.go:496] detecting cgroup driver to use...
I1121 14:29:37.469471 255774 detect.go:190] detected "systemd" cgroup driver on host os
I1121 14:29:37.469521 255774 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1121 14:29:37.490669 255774 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1121 14:29:37.507754 255774 docker.go:218] disabling cri-docker service (if available) ...
I1121 14:29:37.507821 255774 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1121 14:29:37.525644 255774 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1121 14:29:37.545289 255774 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1121 14:29:37.674060 255774 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1121 14:29:37.795128 255774 docker.go:234] disabling docker service ...
I1121 14:29:37.795198 255774 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1121 14:29:37.819043 255774 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1121 14:29:37.834819 255774 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1121 14:29:37.960408 255774 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1121 14:29:38.072269 255774 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1121 14:29:38.089314 255774 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1121 14:29:38.105248 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1121 14:29:38.117445 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1121 14:29:38.128509 255774 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1121 14:29:38.128607 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1121 14:29:38.139526 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1121 14:29:38.150896 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1121 14:29:38.161459 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1121 14:29:38.173179 255774 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1121 14:29:38.183645 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1121 14:29:38.194923 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1121 14:29:38.207896 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1121 14:29:38.220346 255774 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1121 14:29:38.230823 255774 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1121 14:29:38.241807 255774 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:38.339708 255774 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1121 14:29:38.460319 255774 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1121 14:29:38.460387 255774 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1121 14:29:38.465812 255774 start.go:564] Will wait 60s for crictl version
I1121 14:29:38.465875 255774 ssh_runner.go:195] Run: which crictl
I1121 14:29:38.470166 255774 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1121 14:29:38.507773 255774 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1121 14:29:38.507860 255774 ssh_runner.go:195] Run: containerd --version
I1121 14:29:38.532247 255774 ssh_runner.go:195] Run: containerd --version
I1121 14:29:38.559098 255774 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
W1121 14:29:33.655577 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:29:33.655599 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:29:33.655612 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:36.225853 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:29:36.226247 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:29:36.226304 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:29:36.226364 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:29:36.259583 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:36.259613 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:36.259619 213058 cri.go:89] found id: ""
I1121 14:29:36.259628 213058 logs.go:282] 2 containers: [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:29:36.259690 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:36.264798 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:36.269597 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:29:36.269663 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:29:36.304312 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:36.304335 213058 cri.go:89] found id: ""
I1121 14:29:36.304346 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:29:36.304403 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:36.309760 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:29:36.309833 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:29:36.342617 213058 cri.go:89] found id: ""
I1121 14:29:36.342643 213058 logs.go:282] 0 containers: []
W1121 14:29:36.342653 213058 logs.go:284] No container was found matching "coredns"
I1121 14:29:36.342660 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:29:36.342722 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:29:36.378880 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:36.378909 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:36.378914 213058 cri.go:89] found id: ""
I1121 14:29:36.378924 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:29:36.378996 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:36.384032 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:36.388866 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:29:36.388932 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:29:36.427253 213058 cri.go:89] found id: ""
I1121 14:29:36.427282 213058 logs.go:282] 0 containers: []
W1121 14:29:36.427293 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:29:36.427300 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:29:36.427355 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:29:36.461581 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:36.461604 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:36.461609 213058 cri.go:89] found id: ""
I1121 14:29:36.461618 213058 logs.go:282] 2 containers: [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:29:36.461677 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:36.466623 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:36.471422 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:29:36.471490 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:29:36.503502 213058 cri.go:89] found id: ""
I1121 14:29:36.503533 213058 logs.go:282] 0 containers: []
W1121 14:29:36.503566 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:29:36.503575 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:29:36.503633 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:29:36.538350 213058 cri.go:89] found id: ""
I1121 14:29:36.538379 213058 logs.go:282] 0 containers: []
W1121 14:29:36.538390 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:29:36.538404 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:29:36.538419 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:29:36.666987 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:29:36.667025 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:29:36.685628 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:29:36.685659 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:29:36.763464 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:29:36.763491 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:29:36.763508 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:36.808789 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:29:36.808832 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:36.887558 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:29:36.887596 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:29:36.952391 213058 logs.go:123] Gathering logs for container status ...
I1121 14:29:36.952434 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:29:36.993139 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:29:36.993167 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:37.037499 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:29:37.037552 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:37.084237 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:29:37.084270 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:37.132236 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:29:37.132272 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:37.172720 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:29:37.172753 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:34.341753 252125 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1: (1.474720913s)
I1121 14:29:34.341781 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 from cache
I1121 14:29:34.341812 252125 containerd.go:285] Loading image: /var/lib/minikube/images/etcd_3.6.4-0
I1121 14:29:34.341855 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0
I1121 14:29:37.308520 252125 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0: (2.966633628s)
I1121 14:29:37.308585 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 from cache
I1121 14:29:37.308616 252125 containerd.go:285] Loading image: /var/lib/minikube/images/storage-provisioner_v5
I1121 14:29:37.308666 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5
I1121 14:29:37.772300 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
I1121 14:29:37.772349 252125 cache_images.go:125] Successfully loaded all cached images
I1121 14:29:37.772358 252125 cache_images.go:94] duration metric: took 13.627858156s to LoadCachedImages
I1121 14:29:37.772375 252125 kubeadm.go:935] updating node { 192.168.103.2 8443 v1.34.1 containerd true true} ...
I1121 14:29:37.772522 252125 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-921956 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.103.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:no-preload-921956 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1121 14:29:37.772622 252125 ssh_runner.go:195] Run: sudo crictl info
I1121 14:29:37.802988 252125 cni.go:84] Creating CNI manager for ""
I1121 14:29:37.803017 252125 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:29:37.803041 252125 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1121 14:29:37.803067 252125 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.103.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-921956 NodeName:no-preload-921956 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.103.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.103.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1121 14:29:37.803212 252125 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.103.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "no-preload-921956"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.103.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.103.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1121 14:29:37.803298 252125 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1121 14:29:37.814189 252125 binaries.go:54] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.34.1: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/binaries/v1.34.1': No such file or directory
Initiating transfer...
I1121 14:29:37.814255 252125 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.34.1
I1121 14:29:37.824124 252125 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl.sha256
I1121 14:29:37.824214 252125 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm.sha256 -> /home/jenkins/minikube-integration/21847-11004/.minikube/cache/linux/amd64/v1.34.1/kubeadm
I1121 14:29:37.824231 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl
I1121 14:29:37.824217 252125 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet.sha256 -> /home/jenkins/minikube-integration/21847-11004/.minikube/cache/linux/amd64/v1.34.1/kubelet
I1121 14:29:37.829417 252125 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubectl: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubectl': No such file or directory
I1121 14:29:37.829466 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/linux/amd64/v1.34.1/kubectl --> /var/lib/minikube/binaries/v1.34.1/kubectl (60559544 bytes)
I1121 14:29:38.860713 252125 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1121 14:29:38.875498 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet
I1121 14:29:38.880447 252125 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubelet: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubelet': No such file or directory
I1121 14:29:38.880477 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/linux/amd64/v1.34.1/kubelet --> /var/lib/minikube/binaries/v1.34.1/kubelet (59195684 bytes)
I1121 14:29:39.014274 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm
I1121 14:29:39.021151 252125 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubeadm: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubeadm': No such file or directory
I1121 14:29:39.021187 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/linux/amd64/v1.34.1/kubeadm --> /var/lib/minikube/binaries/v1.34.1/kubeadm (74027192 bytes)
I1121 14:29:39.234010 252125 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1121 14:29:39.244382 252125 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (322 bytes)
I1121 14:29:39.259897 252125 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1121 14:29:39.279143 252125 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2232 bytes)
I1121 14:29:38.560688 255774 cli_runner.go:164] Run: docker network inspect default-k8s-diff-port-376255 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1121 14:29:38.580956 255774 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I1121 14:29:38.585728 255774 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1121 14:29:38.599140 255774 kubeadm.go:884] updating cluster {Name:default-k8s-diff-port-376255 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-376255 Namespace:default APIServerHAVIP: APISer
verName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDN
SLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1121 14:29:38.599295 255774 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1121 14:29:38.599391 255774 ssh_runner.go:195] Run: sudo crictl images --output json
I1121 14:29:38.631637 255774 containerd.go:627] all images are preloaded for containerd runtime.
I1121 14:29:38.631660 255774 containerd.go:534] Images already preloaded, skipping extraction
I1121 14:29:38.631720 255774 ssh_runner.go:195] Run: sudo crictl images --output json
I1121 14:29:38.665498 255774 containerd.go:627] all images are preloaded for containerd runtime.
I1121 14:29:38.665522 255774 cache_images.go:86] Images are preloaded, skipping loading
I1121 14:29:38.665530 255774 kubeadm.go:935] updating node { 192.168.85.2 8444 v1.34.1 containerd true true} ...
I1121 14:29:38.665659 255774 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=default-k8s-diff-port-376255 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-376255 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1121 14:29:38.665752 255774 ssh_runner.go:195] Run: sudo crictl info
I1121 14:29:38.694106 255774 cni.go:84] Creating CNI manager for ""
I1121 14:29:38.694138 255774 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:29:38.694156 255774 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1121 14:29:38.694182 255774 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8444 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:default-k8s-diff-port-376255 NodeName:default-k8s-diff-port-376255 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/ce
rts/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1121 14:29:38.694318 255774 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8444
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "default-k8s-diff-port-376255"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.85.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8444
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1121 14:29:38.694377 255774 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1121 14:29:38.704016 255774 binaries.go:51] Found k8s binaries, skipping transfer
I1121 14:29:38.704074 255774 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1121 14:29:38.712471 255774 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (332 bytes)
I1121 14:29:38.726311 255774 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1121 14:29:38.743589 255774 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2240 bytes)
I1121 14:29:38.759275 255774 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I1121 14:29:38.763723 255774 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1121 14:29:38.775814 255774 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:38.870850 255774 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:29:38.898876 255774 certs.go:69] Setting up /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255 for IP: 192.168.85.2
I1121 14:29:38.898898 255774 certs.go:195] generating shared ca certs ...
I1121 14:29:38.898917 255774 certs.go:227] acquiring lock for ca certs: {Name:mk4ac68319839cd6684afc66121341297238277f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:38.899068 255774 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21847-11004/.minikube/ca.key
I1121 14:29:38.899116 255774 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.key
I1121 14:29:38.899130 255774 certs.go:257] generating profile certs ...
I1121 14:29:38.899196 255774 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/client.key
I1121 14:29:38.899223 255774 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/client.crt with IP's: []
I1121 14:29:39.101636 255774 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/client.crt ...
I1121 14:29:39.101669 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/client.crt: {Name:mk48f410a390b01d5b10a9357a2648374ae8306b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.101873 255774 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/client.key ...
I1121 14:29:39.101885 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/client.key: {Name:mkb89c45215e08640f5b5fa9a6de6863ea0983e1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.102008 255774 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.key.3377c066
I1121 14:29:39.102024 255774 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.crt.3377c066 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.85.2]
I1121 14:29:39.438352 255774 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.crt.3377c066 ...
I1121 14:29:39.438387 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.crt.3377c066: {Name:mkc5f7dc938a9541dec0c2accd850515b39a25d9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.438574 255774 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.key.3377c066 ...
I1121 14:29:39.438586 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.key.3377c066: {Name:mka67f2d91e35acd02a0ed4174188db6877ef796 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.438666 255774 certs.go:382] copying /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.crt.3377c066 -> /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.crt
I1121 14:29:39.438744 255774 certs.go:386] copying /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.key.3377c066 -> /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.key
I1121 14:29:39.438811 255774 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.key
I1121 14:29:39.438826 255774 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.crt with IP's: []
I1121 14:29:39.523793 255774 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.crt ...
I1121 14:29:39.523827 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.crt: {Name:mk2418751bb08ae4f2cae2628ba430b2e731f823 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.524011 255774 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.key ...
I1121 14:29:39.524031 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.key: {Name:mk12031f310020bd38886fd870544563c6ab1faf Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.524255 255774 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523.pem (1338 bytes)
W1121 14:29:39.524307 255774 certs.go:480] ignoring /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523_empty.pem, impossibly tiny 0 bytes
I1121 14:29:39.524323 255774 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca-key.pem (1675 bytes)
I1121 14:29:39.524353 255774 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem (1078 bytes)
I1121 14:29:39.524383 255774 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem (1123 bytes)
I1121 14:29:39.524407 255774 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/key.pem (1675 bytes)
I1121 14:29:39.524445 255774 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem (1708 bytes)
I1121 14:29:39.525071 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1121 14:29:39.546065 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I1121 14:29:39.565880 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1121 14:29:39.585450 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1121 14:29:39.604394 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1440 bytes)
I1121 14:29:39.623736 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1121 14:29:39.642460 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1121 14:29:39.661463 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1121 14:29:39.681314 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523.pem --> /usr/share/ca-certificates/14523.pem (1338 bytes)
I1121 14:29:39.879137 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem --> /usr/share/ca-certificates/145232.pem (1708 bytes)
I1121 14:29:39.899730 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1121 14:29:39.918630 255774 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1121 14:29:39.935942 255774 ssh_runner.go:195] Run: openssl version
I1121 14:29:39.943062 255774 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/145232.pem && ln -fs /usr/share/ca-certificates/145232.pem /etc/ssl/certs/145232.pem"
I1121 14:29:40.020861 255774 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/145232.pem
I1121 14:29:40.026152 255774 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 21 14:01 /usr/share/ca-certificates/145232.pem
I1121 14:29:40.026209 255774 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/145232.pem
I1121 14:29:40.067681 255774 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/145232.pem /etc/ssl/certs/3ec20f2e.0"
I1121 14:29:40.077051 255774 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1121 14:29:40.087944 255774 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:40.092369 255774 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 21 13:56 /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:40.092434 255774 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:40.132125 255774 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1121 14:29:40.142255 255774 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/14523.pem && ln -fs /usr/share/ca-certificates/14523.pem /etc/ssl/certs/14523.pem"
I1121 14:29:40.152828 255774 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/14523.pem
I1121 14:29:40.157171 255774 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 21 14:01 /usr/share/ca-certificates/14523.pem
I1121 14:29:40.157265 255774 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/14523.pem
I1121 14:29:40.199881 255774 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/14523.pem /etc/ssl/certs/51391683.0"
I1121 14:29:40.210053 255774 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1121 14:29:40.214456 255774 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1121 14:29:40.214524 255774 kubeadm.go:401] StartCluster: {Name:default-k8s-diff-port-376255 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-376255 Namespace:default APIServerHAVIP: APIServer
Name:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLo
g:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1121 14:29:40.214625 255774 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1121 14:29:40.214692 255774 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1121 14:29:40.249359 255774 cri.go:89] found id: ""
I1121 14:29:40.249429 255774 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1121 14:29:40.259121 255774 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1121 14:29:40.270847 255774 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1121 14:29:40.270910 255774 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1121 14:29:40.283266 255774 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1121 14:29:40.283287 255774 kubeadm.go:158] found existing configuration files:
I1121 14:29:40.283341 255774 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/admin.conf
I1121 14:29:40.293676 255774 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1121 14:29:40.293725 255774 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1121 14:29:40.303277 255774 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/kubelet.conf
I1121 14:29:40.313015 255774 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1121 14:29:40.313073 255774 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1121 14:29:40.322086 255774 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/controller-manager.conf
I1121 14:29:40.330920 255774 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1121 14:29:40.331015 255774 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1121 14:29:40.339376 255774 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/scheduler.conf
I1121 14:29:40.347984 255774 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1121 14:29:40.348046 255774 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1121 14:29:40.356683 255774 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1121 14:29:40.404354 255774 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1121 14:29:40.404455 255774 kubeadm.go:319] [preflight] Running pre-flight checks
I1121 14:29:40.435448 255774 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1121 14:29:40.435583 255774 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1121 14:29:40.435628 255774 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1121 14:29:40.435689 255774 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1121 14:29:40.435827 255774 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1121 14:29:40.435905 255774 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1121 14:29:40.436039 255774 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1121 14:29:40.436108 255774 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1121 14:29:40.436176 255774 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1121 14:29:40.436276 255774 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1121 14:29:40.436351 255774 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1121 14:29:40.508224 255774 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1121 14:29:40.508370 255774 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1121 14:29:40.508531 255774 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1121 14:29:40.513996 255774 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1121 14:29:39.295828 252125 ssh_runner.go:195] Run: grep 192.168.103.2 control-plane.minikube.internal$ /etc/hosts
I1121 14:29:39.301164 252125 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.103.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1121 14:29:39.312709 252125 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:39.400897 252125 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:29:39.429294 252125 certs.go:69] Setting up /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956 for IP: 192.168.103.2
I1121 14:29:39.429315 252125 certs.go:195] generating shared ca certs ...
I1121 14:29:39.429332 252125 certs.go:227] acquiring lock for ca certs: {Name:mk4ac68319839cd6684afc66121341297238277f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.429485 252125 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21847-11004/.minikube/ca.key
I1121 14:29:39.429583 252125 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.key
I1121 14:29:39.429600 252125 certs.go:257] generating profile certs ...
I1121 14:29:39.429678 252125 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/client.key
I1121 14:29:39.429693 252125 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/client.crt with IP's: []
I1121 14:29:39.556088 252125 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/client.crt ...
I1121 14:29:39.556115 252125 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/client.crt: {Name:mkc697edce2d4ccb5a4a2ccbe74255aef4a205c9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.556297 252125 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/client.key ...
I1121 14:29:39.556312 252125 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/client.key: {Name:mkad7b167b883af61314c3f8b6c71358edc782dd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.556419 252125 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.key.a2c9a71d
I1121 14:29:39.556435 252125 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.crt.a2c9a71d with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.103.2]
I1121 14:29:39.871499 252125 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.crt.a2c9a71d ...
I1121 14:29:39.871529 252125 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.crt.a2c9a71d: {Name:mkc839b1c936af809ed1159ef4599336fd260d32 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.871726 252125 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.key.a2c9a71d ...
I1121 14:29:39.871748 252125 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.key.a2c9a71d: {Name:mkc2f0abcac84f6547f3e0edb165e90b14fdd7c2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.871882 252125 certs.go:382] copying /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.crt.a2c9a71d -> /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.crt
I1121 14:29:39.871997 252125 certs.go:386] copying /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.key.a2c9a71d -> /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.key
I1121 14:29:39.872096 252125 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.key
I1121 14:29:39.872120 252125 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.crt with IP's: []
I1121 14:29:40.083173 252125 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.crt ...
I1121 14:29:40.083201 252125 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.crt: {Name:mkba7efd029f616230e0b3cf14c4f32abac0549e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:40.083385 252125 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.key ...
I1121 14:29:40.083414 252125 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.key: {Name:mk24f6fbb57f5dfce4a401be193e0a832a6ccf6a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:40.083661 252125 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523.pem (1338 bytes)
W1121 14:29:40.083700 252125 certs.go:480] ignoring /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523_empty.pem, impossibly tiny 0 bytes
I1121 14:29:40.083711 252125 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca-key.pem (1675 bytes)
I1121 14:29:40.083749 252125 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem (1078 bytes)
I1121 14:29:40.083780 252125 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem (1123 bytes)
I1121 14:29:40.083827 252125 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/key.pem (1675 bytes)
I1121 14:29:40.083887 252125 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem (1708 bytes)
I1121 14:29:40.084653 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1121 14:29:40.106430 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I1121 14:29:40.126520 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1121 14:29:40.148412 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1121 14:29:40.169973 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1121 14:29:40.191493 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1121 14:29:40.214458 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1121 14:29:40.234692 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1121 14:29:40.261986 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem --> /usr/share/ca-certificates/145232.pem (1708 bytes)
I1121 14:29:40.352437 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1121 14:29:40.372804 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523.pem --> /usr/share/ca-certificates/14523.pem (1338 bytes)
I1121 14:29:40.394700 252125 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1121 14:29:40.411183 252125 ssh_runner.go:195] Run: openssl version
I1121 14:29:40.419607 252125 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1121 14:29:40.431060 252125 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:40.436371 252125 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 21 13:56 /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:40.436429 252125 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:40.481320 252125 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1121 14:29:40.492797 252125 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/14523.pem && ln -fs /usr/share/ca-certificates/14523.pem /etc/ssl/certs/14523.pem"
I1121 14:29:40.502878 252125 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/14523.pem
I1121 14:29:40.507432 252125 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 21 14:01 /usr/share/ca-certificates/14523.pem
I1121 14:29:40.507499 252125 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/14523.pem
I1121 14:29:40.567779 252125 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/14523.pem /etc/ssl/certs/51391683.0"
I1121 14:29:40.577673 252125 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/145232.pem && ln -fs /usr/share/ca-certificates/145232.pem /etc/ssl/certs/145232.pem"
I1121 14:29:40.587826 252125 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/145232.pem
I1121 14:29:40.592472 252125 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 21 14:01 /usr/share/ca-certificates/145232.pem
I1121 14:29:40.592528 252125 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/145232.pem
I1121 14:29:40.627626 252125 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/145232.pem /etc/ssl/certs/3ec20f2e.0"
I1121 14:29:40.637464 252125 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1121 14:29:40.641884 252125 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1121 14:29:40.641943 252125 kubeadm.go:401] StartCluster: {Name:no-preload-921956 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-921956 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISer
verNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFir
mwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1121 14:29:40.642030 252125 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1121 14:29:40.642085 252125 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1121 14:29:40.673351 252125 cri.go:89] found id: ""
I1121 14:29:40.673423 252125 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1121 14:29:40.682715 252125 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1121 14:29:40.691493 252125 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1121 14:29:40.691581 252125 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1121 14:29:40.700143 252125 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1121 14:29:40.700160 252125 kubeadm.go:158] found existing configuration files:
I1121 14:29:40.700205 252125 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1121 14:29:40.708734 252125 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1121 14:29:40.708799 252125 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1121 14:29:40.717135 252125 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1121 14:29:40.726191 252125 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1121 14:29:40.726262 252125 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1121 14:29:40.734074 252125 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1121 14:29:40.742647 252125 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1121 14:29:40.742709 252125 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1121 14:29:40.751091 252125 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1121 14:29:40.759770 252125 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1121 14:29:40.759841 252125 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1121 14:29:40.768253 252125 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1121 14:29:40.810825 252125 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1121 14:29:40.810892 252125 kubeadm.go:319] [preflight] Running pre-flight checks
I1121 14:29:40.831836 252125 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1121 14:29:40.831940 252125 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1121 14:29:40.832026 252125 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1121 14:29:40.832115 252125 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1121 14:29:40.832212 252125 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1121 14:29:40.832286 252125 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1121 14:29:40.832358 252125 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1121 14:29:40.832432 252125 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1121 14:29:40.832504 252125 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1121 14:29:40.832668 252125 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1121 14:29:40.832735 252125 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1121 14:29:40.895341 252125 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1121 14:29:40.895491 252125 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1121 14:29:40.895637 252125 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1121 14:29:40.901358 252125 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1121 14:29:37.249631 249617 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1121 14:29:37.262987 249617 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1121 14:29:37.263020 249617 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1121 14:29:37.283444 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1121 14:29:38.138719 249617 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1121 14:29:38.138808 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:38.138810 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-012258 minikube.k8s.io/updated_at=2025_11_21T14_29_38_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=29e0798733fefbdc471fd2bbb38f6a7ae2a26162 minikube.k8s.io/name=old-k8s-version-012258 minikube.k8s.io/primary=true
I1121 14:29:38.150782 249617 ops.go:34] apiserver oom_adj: -16
I1121 14:29:38.225220 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:38.726231 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:39.225533 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:39.725591 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:40.225601 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:40.725734 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:41.226112 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:40.521190 255774 out.go:252] - Generating certificates and keys ...
I1121 14:29:40.521325 255774 kubeadm.go:319] [certs] Using existing ca certificate authority
I1121 14:29:40.521431 255774 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1121 14:29:41.003970 255774 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1121 14:29:41.240665 255774 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1121 14:29:41.425685 255774 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1121 14:29:41.689428 255774 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1121 14:29:41.923373 255774 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1121 14:29:41.923563 255774 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [default-k8s-diff-port-376255 localhost] and IPs [192.168.85.2 127.0.0.1 ::1]
I1121 14:29:42.051973 255774 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1121 14:29:42.052979 255774 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [default-k8s-diff-port-376255 localhost] and IPs [192.168.85.2 127.0.0.1 ::1]
I1121 14:29:42.277531 255774 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1121 14:29:42.491572 255774 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1121 14:29:42.605458 255774 kubeadm.go:319] [certs] Generating "sa" key and public key
I1121 14:29:42.605535 255774 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1121 14:29:42.870659 255774 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1121 14:29:43.039072 255774 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1121 14:29:43.228611 255774 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1121 14:29:43.489903 255774 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1121 14:29:43.563271 255774 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1121 14:29:43.563948 255774 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1121 14:29:43.568453 255774 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1121 14:29:39.727688 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:29:39.728083 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:29:39.728134 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:29:39.728197 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:29:39.758413 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:39.758436 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:39.758441 213058 cri.go:89] found id: ""
I1121 14:29:39.758452 213058 logs.go:282] 2 containers: [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:29:39.758508 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:39.763439 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:39.767912 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:29:39.767980 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:29:39.802923 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:39.802948 213058 cri.go:89] found id: ""
I1121 14:29:39.802957 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:29:39.803013 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:39.807778 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:29:39.807853 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:29:39.835286 213058 cri.go:89] found id: ""
I1121 14:29:39.835314 213058 logs.go:282] 0 containers: []
W1121 14:29:39.835335 213058 logs.go:284] No container was found matching "coredns"
I1121 14:29:39.835343 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:29:39.835408 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:29:39.864986 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:39.865034 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:39.865040 213058 cri.go:89] found id: ""
I1121 14:29:39.865050 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:29:39.865105 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:39.869441 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:39.873676 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:29:39.873739 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:29:39.902671 213058 cri.go:89] found id: ""
I1121 14:29:39.902698 213058 logs.go:282] 0 containers: []
W1121 14:29:39.902707 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:29:39.902715 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:29:39.902762 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:29:39.933452 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:39.933477 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:39.933483 213058 cri.go:89] found id: ""
I1121 14:29:39.933492 213058 logs.go:282] 2 containers: [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:29:39.933557 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:39.938051 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:39.942029 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:29:39.942094 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:29:39.969991 213058 cri.go:89] found id: ""
I1121 14:29:39.970018 213058 logs.go:282] 0 containers: []
W1121 14:29:39.970028 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:29:39.970036 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:29:39.970086 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:29:39.997381 213058 cri.go:89] found id: ""
I1121 14:29:39.997406 213058 logs.go:282] 0 containers: []
W1121 14:29:39.997417 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:29:39.997429 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:29:39.997443 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:40.027188 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:29:40.027213 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:40.067878 213058 logs.go:123] Gathering logs for container status ...
I1121 14:29:40.067906 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:29:40.101358 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:29:40.101388 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:29:40.115674 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:29:40.115704 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:40.153845 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:29:40.153871 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:40.188913 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:29:40.188944 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:29:40.244995 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:29:40.245033 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:29:40.351506 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:29:40.351558 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:29:40.417221 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:29:40.417244 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:29:40.417263 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:40.457789 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:29:40.457836 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:40.520712 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:29:40.520748 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:43.056648 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:29:43.057094 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:29:43.057150 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:29:43.057204 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:29:43.085236 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:43.085260 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:43.085265 213058 cri.go:89] found id: ""
I1121 14:29:43.085275 213058 logs.go:282] 2 containers: [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:29:43.085333 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:43.089868 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:43.094074 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:29:43.094134 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:29:43.122420 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:43.122447 213058 cri.go:89] found id: ""
I1121 14:29:43.122457 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:29:43.122512 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:43.126830 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:29:43.126892 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:29:43.156518 213058 cri.go:89] found id: ""
I1121 14:29:43.156566 213058 logs.go:282] 0 containers: []
W1121 14:29:43.156577 213058 logs.go:284] No container was found matching "coredns"
I1121 14:29:43.156584 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:29:43.156646 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:29:43.185212 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:43.185233 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:43.185238 213058 cri.go:89] found id: ""
I1121 14:29:43.185277 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:29:43.185338 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:43.190000 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:43.194074 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:29:43.194131 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:29:43.224175 213058 cri.go:89] found id: ""
I1121 14:29:43.224201 213058 logs.go:282] 0 containers: []
W1121 14:29:43.224211 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:29:43.224218 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:29:43.224277 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:29:43.258260 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:43.258292 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:43.258299 213058 cri.go:89] found id: ""
I1121 14:29:43.258310 213058 logs.go:282] 2 containers: [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:29:43.258378 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:43.263276 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:43.268195 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:29:43.268264 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:29:43.303269 213058 cri.go:89] found id: ""
I1121 14:29:43.303300 213058 logs.go:282] 0 containers: []
W1121 14:29:43.303311 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:29:43.303319 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:29:43.303379 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:29:43.333956 213058 cri.go:89] found id: ""
I1121 14:29:43.333985 213058 logs.go:282] 0 containers: []
W1121 14:29:43.333995 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:29:43.334007 213058 logs.go:123] Gathering logs for container status ...
I1121 14:29:43.334021 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:29:43.366338 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:29:43.366369 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:29:43.458987 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:29:43.459027 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:43.497960 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:29:43.497995 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:43.539997 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:29:43.540035 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:43.575882 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:29:43.575911 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:40.903405 252125 out.go:252] - Generating certificates and keys ...
I1121 14:29:40.903502 252125 kubeadm.go:319] [certs] Using existing ca certificate authority
I1121 14:29:40.903630 252125 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1121 14:29:41.180390 252125 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1121 14:29:41.211121 252125 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1121 14:29:41.523007 252125 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1121 14:29:42.461521 252125 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1121 14:29:42.641495 252125 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1121 14:29:42.641701 252125 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost no-preload-921956] and IPs [192.168.103.2 127.0.0.1 ::1]
I1121 14:29:42.773640 252125 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1121 14:29:42.773843 252125 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost no-preload-921956] and IPs [192.168.103.2 127.0.0.1 ::1]
I1121 14:29:42.921369 252125 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1121 14:29:43.256203 252125 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1121 14:29:43.834470 252125 kubeadm.go:319] [certs] Generating "sa" key and public key
I1121 14:29:43.834645 252125 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1121 14:29:43.949422 252125 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1121 14:29:44.093777 252125 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1121 14:29:44.227287 252125 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1121 14:29:44.509482 252125 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1121 14:29:44.696294 252125 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1121 14:29:44.696767 252125 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1121 14:29:44.705846 252125 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1121 14:29:43.573374 255774 out.go:252] - Booting up control plane ...
I1121 14:29:43.573510 255774 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1121 14:29:43.573669 255774 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1121 14:29:43.573781 255774 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1121 14:29:43.590344 255774 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1121 14:29:43.590494 255774 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1121 14:29:43.599838 255774 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1121 14:29:43.600184 255774 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1121 14:29:43.600247 255774 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1121 14:29:43.720721 255774 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1121 14:29:43.720878 255774 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1121 14:29:44.721899 255774 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.001196965s
I1121 14:29:44.724830 255774 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1121 14:29:44.724972 255774 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.85.2:8444/livez
I1121 14:29:44.725131 255774 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1121 14:29:44.725253 255774 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1121 14:29:41.726266 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:42.225460 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:42.725727 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:43.225740 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:43.725669 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:44.225350 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:44.725651 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:45.226025 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:45.725289 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:46.226316 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:43.632243 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:29:43.632278 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:43.681909 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:29:43.681959 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:43.723402 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:29:43.723454 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:29:43.776606 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:29:43.776641 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:29:43.793171 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:29:43.793200 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:29:43.854264 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:29:43.854293 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:29:43.854308 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:46.383659 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:29:46.384075 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:29:46.384128 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:29:46.384191 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:29:46.441629 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:46.441734 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:46.441754 213058 cri.go:89] found id: ""
I1121 14:29:46.441776 213058 logs.go:282] 2 containers: [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:29:46.441873 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:46.447714 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:46.453337 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:29:46.453422 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:29:46.497451 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:46.497475 213058 cri.go:89] found id: ""
I1121 14:29:46.497485 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:29:46.497585 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:46.504731 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:29:46.504801 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:29:46.562972 213058 cri.go:89] found id: ""
I1121 14:29:46.563014 213058 logs.go:282] 0 containers: []
W1121 14:29:46.563027 213058 logs.go:284] No container was found matching "coredns"
I1121 14:29:46.563036 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:29:46.563287 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:29:46.611186 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:46.611216 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:46.611221 213058 cri.go:89] found id: ""
I1121 14:29:46.611231 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:29:46.611289 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:46.620404 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:46.626388 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:29:46.626559 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:29:46.674192 213058 cri.go:89] found id: ""
I1121 14:29:46.674247 213058 logs.go:282] 0 containers: []
W1121 14:29:46.674259 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:29:46.674267 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:29:46.674448 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:29:46.749738 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:46.749765 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:46.749771 213058 cri.go:89] found id: ""
I1121 14:29:46.749780 213058 logs.go:282] 2 containers: [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:29:46.749835 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:46.756273 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:46.763986 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:29:46.764120 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:29:46.811858 213058 cri.go:89] found id: ""
I1121 14:29:46.811883 213058 logs.go:282] 0 containers: []
W1121 14:29:46.811901 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:29:46.811909 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:29:46.811963 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:29:46.849599 213058 cri.go:89] found id: ""
I1121 14:29:46.849645 213058 logs.go:282] 0 containers: []
W1121 14:29:46.849655 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:29:46.849666 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:29:46.849683 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:46.913988 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:29:46.914024 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:46.953189 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:29:46.953227 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:47.001663 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:29:47.001705 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:47.041106 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:29:47.041137 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:29:47.107673 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:29:47.107712 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:29:47.240432 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:29:47.240473 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:29:47.288852 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:29:47.288894 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1121 14:29:46.531314 255774 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 1.80645272s
I1121 14:29:47.509316 255774 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 2.784421033s
I1121 14:29:49.226647 255774 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 4.501794549s
I1121 14:29:49.239409 255774 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1121 14:29:49.252719 255774 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1121 14:29:49.264076 255774 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1121 14:29:49.264371 255774 kubeadm.go:319] [mark-control-plane] Marking the node default-k8s-diff-port-376255 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1121 14:29:49.274799 255774 kubeadm.go:319] [bootstrap-token] Using token: 8nwcfl.9utqukqcvuro6a4p
I1121 14:29:44.769338 252125 out.go:252] - Booting up control plane ...
I1121 14:29:44.769476 252125 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1121 14:29:44.769652 252125 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1121 14:29:44.769771 252125 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1121 14:29:44.769940 252125 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1121 14:29:44.770087 252125 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1121 14:29:44.778391 252125 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1121 14:29:44.779655 252125 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1121 14:29:44.779729 252125 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1121 14:29:44.894196 252125 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1121 14:29:44.894364 252125 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1121 14:29:45.895053 252125 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.000974959s
I1121 14:29:45.898754 252125 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1121 14:29:45.898875 252125 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.103.2:8443/livez
I1121 14:29:45.899003 252125 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1121 14:29:45.899149 252125 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1121 14:29:48.621169 252125 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 2.722350043s
I1121 14:29:49.059709 252125 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 3.160801257s
I1121 14:29:49.276414 255774 out.go:252] - Configuring RBAC rules ...
I1121 14:29:49.276590 255774 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1121 14:29:49.280532 255774 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1121 14:29:49.287374 255774 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1121 14:29:49.290401 255774 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1121 14:29:49.293308 255774 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1121 14:29:49.297552 255774 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1121 14:29:49.632747 255774 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1121 14:29:46.726037 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:47.228665 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:47.725338 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:48.226199 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:48.725959 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:49.225812 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:49.725337 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:50.225293 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:50.310282 249617 kubeadm.go:1114] duration metric: took 12.17154172s to wait for elevateKubeSystemPrivileges
I1121 14:29:50.310322 249617 kubeadm.go:403] duration metric: took 23.370802852s to StartCluster
I1121 14:29:50.310347 249617 settings.go:142] acquiring lock: {Name:mkfe3f8167491ec1abfca3e17282002404072955 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:50.310438 249617 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21847-11004/kubeconfig
I1121 14:29:50.311864 249617 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/kubeconfig: {Name:mk5d3e3ed379bd47c91313113a93ad7e3f44dbb9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:50.312167 249617 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1121 14:29:50.312169 249617 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.94.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1121 14:29:50.312267 249617 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1121 14:29:50.312352 249617 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-012258"
I1121 14:29:50.312372 249617 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-012258"
I1121 14:29:50.312403 249617 host.go:66] Checking if "old-k8s-version-012258" exists ...
I1121 14:29:50.312458 249617 config.go:182] Loaded profile config "old-k8s-version-012258": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1121 14:29:50.312516 249617 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-012258"
I1121 14:29:50.312530 249617 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-012258"
I1121 14:29:50.312827 249617 cli_runner.go:164] Run: docker container inspect old-k8s-version-012258 --format={{.State.Status}}
I1121 14:29:50.312965 249617 cli_runner.go:164] Run: docker container inspect old-k8s-version-012258 --format={{.State.Status}}
I1121 14:29:50.314603 249617 out.go:179] * Verifying Kubernetes components...
I1121 14:29:50.316238 249617 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:50.339724 249617 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:50.056893 255774 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1121 14:29:50.634602 255774 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1121 14:29:50.635720 255774 kubeadm.go:319]
I1121 14:29:50.635840 255774 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1121 14:29:50.635916 255774 kubeadm.go:319]
I1121 14:29:50.636085 255774 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1121 14:29:50.636139 255774 kubeadm.go:319]
I1121 14:29:50.636189 255774 kubeadm.go:319] mkdir -p $HOME/.kube
I1121 14:29:50.636300 255774 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1121 14:29:50.636386 255774 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1121 14:29:50.636448 255774 kubeadm.go:319]
I1121 14:29:50.636574 255774 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1121 14:29:50.636584 255774 kubeadm.go:319]
I1121 14:29:50.636647 255774 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1121 14:29:50.636652 255774 kubeadm.go:319]
I1121 14:29:50.636709 255774 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1121 14:29:50.636796 255774 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1121 14:29:50.636878 255774 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1121 14:29:50.636886 255774 kubeadm.go:319]
I1121 14:29:50.636981 255774 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1121 14:29:50.637083 255774 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1121 14:29:50.637090 255774 kubeadm.go:319]
I1121 14:29:50.637247 255774 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8444 --token 8nwcfl.9utqukqcvuro6a4p \
I1121 14:29:50.637414 255774 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:2aad198f79b3258593291a08f0028a72548d0fc82d6b54639b7d7d17a52adfdb \
I1121 14:29:50.637449 255774 kubeadm.go:319] --control-plane
I1121 14:29:50.637460 255774 kubeadm.go:319]
I1121 14:29:50.637571 255774 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1121 14:29:50.637580 255774 kubeadm.go:319]
I1121 14:29:50.637672 255774 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8444 --token 8nwcfl.9utqukqcvuro6a4p \
I1121 14:29:50.637785 255774 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:2aad198f79b3258593291a08f0028a72548d0fc82d6b54639b7d7d17a52adfdb
I1121 14:29:50.642202 255774 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1121 14:29:50.642513 255774 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1121 14:29:50.642647 255774 cni.go:84] Creating CNI manager for ""
I1121 14:29:50.642693 255774 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:29:50.645524 255774 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1121 14:29:50.339929 249617 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-012258"
I1121 14:29:50.339977 249617 host.go:66] Checking if "old-k8s-version-012258" exists ...
I1121 14:29:50.340433 249617 cli_runner.go:164] Run: docker container inspect old-k8s-version-012258 --format={{.State.Status}}
I1121 14:29:50.341133 249617 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:29:50.341154 249617 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1121 14:29:50.341208 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:50.377822 249617 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1121 14:29:50.377846 249617 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1121 14:29:50.377844 249617 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33060 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/old-k8s-version-012258/id_rsa Username:docker}
I1121 14:29:50.377907 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:50.410483 249617 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33060 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/old-k8s-version-012258/id_rsa Username:docker}
I1121 14:29:50.415901 249617 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.94.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1121 14:29:50.468678 249617 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:29:50.503643 249617 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:29:50.536480 249617 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1121 14:29:50.667362 249617 start.go:977] {"host.minikube.internal": 192.168.94.1} host record injected into CoreDNS's ConfigMap
I1121 14:29:50.668484 249617 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-012258" to be "Ready" ...
I1121 14:29:50.954598 249617 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1121 14:29:50.401999 252125 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 4.502477764s
I1121 14:29:50.419850 252125 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1121 14:29:50.933016 252125 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1121 14:29:50.948821 252125 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1121 14:29:50.949093 252125 kubeadm.go:319] [mark-control-plane] Marking the node no-preload-921956 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1121 14:29:50.961417 252125 kubeadm.go:319] [bootstrap-token] Using token: uhuim0.7wh8hbt7v76eo7qs
I1121 14:29:50.955828 249617 addons.go:530] duration metric: took 643.55365ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1121 14:29:51.174831 249617 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-012258" context rescaled to 1 replicas
I1121 14:29:50.963415 252125 out.go:252] - Configuring RBAC rules ...
I1121 14:29:50.963588 252125 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1121 14:29:50.971176 252125 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1121 14:29:50.980644 252125 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1121 14:29:50.985255 252125 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1121 14:29:50.989946 252125 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1121 14:29:50.994015 252125 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1121 14:29:51.128309 252125 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1121 14:29:51.550178 252125 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1121 14:29:52.128624 252125 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1121 14:29:52.129402 252125 kubeadm.go:319]
I1121 14:29:52.129496 252125 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1121 14:29:52.129528 252125 kubeadm.go:319]
I1121 14:29:52.129657 252125 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1121 14:29:52.129669 252125 kubeadm.go:319]
I1121 14:29:52.129705 252125 kubeadm.go:319] mkdir -p $HOME/.kube
I1121 14:29:52.129798 252125 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1121 14:29:52.129906 252125 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1121 14:29:52.129923 252125 kubeadm.go:319]
I1121 14:29:52.129995 252125 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1121 14:29:52.130004 252125 kubeadm.go:319]
I1121 14:29:52.130078 252125 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1121 14:29:52.130087 252125 kubeadm.go:319]
I1121 14:29:52.130170 252125 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1121 14:29:52.130304 252125 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1121 14:29:52.130418 252125 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1121 14:29:52.130446 252125 kubeadm.go:319]
I1121 14:29:52.130574 252125 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1121 14:29:52.130677 252125 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1121 14:29:52.130685 252125 kubeadm.go:319]
I1121 14:29:52.130797 252125 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token uhuim0.7wh8hbt7v76eo7qs \
I1121 14:29:52.130966 252125 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:2aad198f79b3258593291a08f0028a72548d0fc82d6b54639b7d7d17a52adfdb \
I1121 14:29:52.131000 252125 kubeadm.go:319] --control-plane
I1121 14:29:52.131035 252125 kubeadm.go:319]
I1121 14:29:52.131212 252125 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1121 14:29:52.131230 252125 kubeadm.go:319]
I1121 14:29:52.131343 252125 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token uhuim0.7wh8hbt7v76eo7qs \
I1121 14:29:52.131485 252125 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:2aad198f79b3258593291a08f0028a72548d0fc82d6b54639b7d7d17a52adfdb
I1121 14:29:52.132830 252125 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1121 14:29:52.132967 252125 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1121 14:29:52.133003 252125 cni.go:84] Creating CNI manager for ""
I1121 14:29:52.133014 252125 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:29:52.134968 252125 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1121 14:29:52.136241 252125 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1121 14:29:52.141107 252125 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.1/kubectl ...
I1121 14:29:52.141131 252125 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1121 14:29:52.155585 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1121 14:29:52.395340 252125 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1121 14:29:52.395422 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:52.395526 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes no-preload-921956 minikube.k8s.io/updated_at=2025_11_21T14_29_52_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=29e0798733fefbdc471fd2bbb38f6a7ae2a26162 minikube.k8s.io/name=no-preload-921956 minikube.k8s.io/primary=true
I1121 14:29:52.481012 252125 ops.go:34] apiserver oom_adj: -16
I1121 14:29:52.481125 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:52.982198 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:53.481748 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:53.981282 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:50.646815 255774 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1121 14:29:50.654615 255774 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.1/kubectl ...
I1121 14:29:50.654642 255774 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1121 14:29:50.673887 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1121 14:29:50.944978 255774 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1121 14:29:50.945143 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:50.945309 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes default-k8s-diff-port-376255 minikube.k8s.io/updated_at=2025_11_21T14_29_50_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=29e0798733fefbdc471fd2bbb38f6a7ae2a26162 minikube.k8s.io/name=default-k8s-diff-port-376255 minikube.k8s.io/primary=true
I1121 14:29:50.960009 255774 ops.go:34] apiserver oom_adj: -16
I1121 14:29:51.036596 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:51.537134 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:52.037345 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:52.536941 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:53.037592 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:53.536966 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:54.036678 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:54.536697 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:55.037499 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:55.536808 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:55.610391 255774 kubeadm.go:1114] duration metric: took 4.665295307s to wait for elevateKubeSystemPrivileges
I1121 14:29:55.610426 255774 kubeadm.go:403] duration metric: took 15.395907943s to StartCluster
I1121 14:29:55.610448 255774 settings.go:142] acquiring lock: {Name:mkfe3f8167491ec1abfca3e17282002404072955 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:55.610511 255774 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21847-11004/kubeconfig
I1121 14:29:55.612071 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/kubeconfig: {Name:mk5d3e3ed379bd47c91313113a93ad7e3f44dbb9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:55.612346 255774 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.85.2 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1121 14:29:55.612498 255774 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1121 14:29:55.612612 255774 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1121 14:29:55.612696 255774 addons.go:70] Setting storage-provisioner=true in profile "default-k8s-diff-port-376255"
I1121 14:29:55.612713 255774 addons.go:239] Setting addon storage-provisioner=true in "default-k8s-diff-port-376255"
I1121 14:29:55.612745 255774 host.go:66] Checking if "default-k8s-diff-port-376255" exists ...
I1121 14:29:55.612775 255774 config.go:182] Loaded profile config "default-k8s-diff-port-376255": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1121 14:29:55.612835 255774 addons.go:70] Setting default-storageclass=true in profile "default-k8s-diff-port-376255"
I1121 14:29:55.612852 255774 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "default-k8s-diff-port-376255"
I1121 14:29:55.613218 255774 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-376255 --format={{.State.Status}}
I1121 14:29:55.613392 255774 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-376255 --format={{.State.Status}}
I1121 14:29:55.613476 255774 out.go:179] * Verifying Kubernetes components...
I1121 14:29:55.615420 255774 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:55.641842 255774 addons.go:239] Setting addon default-storageclass=true in "default-k8s-diff-port-376255"
I1121 14:29:55.641893 255774 host.go:66] Checking if "default-k8s-diff-port-376255" exists ...
I1121 14:29:55.642317 255774 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-376255 --format={{.State.Status}}
I1121 14:29:55.647007 255774 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:55.648771 255774 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:29:55.648807 255774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1121 14:29:55.648882 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:55.679690 255774 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1121 14:29:55.679713 255774 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1121 14:29:55.679780 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:55.680868 255774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33070 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa Username:docker}
I1121 14:29:55.703091 255774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33070 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa Username:docker}
I1121 14:29:55.713751 255774 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.85.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1121 14:29:55.781953 255774 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:29:55.795189 255774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:29:55.811872 255774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1121 14:29:55.895061 255774 start.go:977] {"host.minikube.internal": 192.168.85.1} host record injected into CoreDNS's ConfigMap
I1121 14:29:55.896386 255774 node_ready.go:35] waiting up to 6m0s for node "default-k8s-diff-port-376255" to be "Ready" ...
I1121 14:29:56.162438 255774 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
W1121 14:29:52.672645 249617 node_ready.go:57] node "old-k8s-version-012258" has "Ready":"False" status (will retry)
W1121 14:29:55.172665 249617 node_ready.go:57] node "old-k8s-version-012258" has "Ready":"False" status (will retry)
I1121 14:29:54.481750 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:54.981303 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:55.481778 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:55.981846 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:56.481336 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:56.981822 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:57.056720 252125 kubeadm.go:1114] duration metric: took 4.66135199s to wait for elevateKubeSystemPrivileges
I1121 14:29:57.056760 252125 kubeadm.go:403] duration metric: took 16.414821557s to StartCluster
I1121 14:29:57.056783 252125 settings.go:142] acquiring lock: {Name:mkfe3f8167491ec1abfca3e17282002404072955 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:57.056866 252125 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21847-11004/kubeconfig
I1121 14:29:57.059279 252125 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/kubeconfig: {Name:mk5d3e3ed379bd47c91313113a93ad7e3f44dbb9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:57.059591 252125 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1121 14:29:57.059595 252125 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1121 14:29:57.059668 252125 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1121 14:29:57.059755 252125 addons.go:70] Setting storage-provisioner=true in profile "no-preload-921956"
I1121 14:29:57.059780 252125 addons.go:239] Setting addon storage-provisioner=true in "no-preload-921956"
I1121 14:29:57.059783 252125 addons.go:70] Setting default-storageclass=true in profile "no-preload-921956"
I1121 14:29:57.059810 252125 host.go:66] Checking if "no-preload-921956" exists ...
I1121 14:29:57.059818 252125 config.go:182] Loaded profile config "no-preload-921956": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1121 14:29:57.059810 252125 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "no-preload-921956"
I1121 14:29:57.060267 252125 cli_runner.go:164] Run: docker container inspect no-preload-921956 --format={{.State.Status}}
I1121 14:29:57.060366 252125 cli_runner.go:164] Run: docker container inspect no-preload-921956 --format={{.State.Status}}
I1121 14:29:57.061615 252125 out.go:179] * Verifying Kubernetes components...
I1121 14:29:57.063049 252125 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:57.087511 252125 addons.go:239] Setting addon default-storageclass=true in "no-preload-921956"
I1121 14:29:57.087574 252125 host.go:66] Checking if "no-preload-921956" exists ...
I1121 14:29:57.088046 252125 cli_runner.go:164] Run: docker container inspect no-preload-921956 --format={{.State.Status}}
I1121 14:29:57.088842 252125 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:57.090553 252125 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:29:57.090577 252125 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1121 14:29:57.090634 252125 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-921956
I1121 14:29:57.113518 252125 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1121 14:29:57.113567 252125 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1121 14:29:57.113644 252125 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-921956
I1121 14:29:57.116604 252125 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33065 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/no-preload-921956/id_rsa Username:docker}
I1121 14:29:57.140626 252125 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33065 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/no-preload-921956/id_rsa Username:docker}
I1121 14:29:57.162241 252125 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.103.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1121 14:29:57.221336 252125 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:29:57.237060 252125 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:29:57.259845 252125 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1121 14:29:57.393470 252125 start.go:977] {"host.minikube.internal": 192.168.103.1} host record injected into CoreDNS's ConfigMap
I1121 14:29:57.394577 252125 node_ready.go:35] waiting up to 6m0s for node "no-preload-921956" to be "Ready" ...
I1121 14:29:57.623024 252125 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1121 14:29:57.414885 213058 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (10.125971322s)
W1121 14:29:57.414929 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Unable to connect to the server: net/http: TLS handshake timeout
output:
** stderr **
Unable to connect to the server: net/http: TLS handshake timeout
** /stderr **
I1121 14:29:57.414939 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:29:57.414952 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:57.462838 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:29:57.462881 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:57.526637 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:29:57.526671 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:57.574224 213058 logs.go:123] Gathering logs for container status ...
I1121 14:29:57.574259 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:29:57.624430 252125 addons.go:530] duration metric: took 564.759261ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1121 14:29:57.898009 252125 kapi.go:214] "coredns" deployment in "kube-system" namespace and "no-preload-921956" context rescaled to 1 replicas
I1121 14:29:56.163632 255774 addons.go:530] duration metric: took 551.031985ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1121 14:29:56.399602 255774 kapi.go:214] "coredns" deployment in "kube-system" namespace and "default-k8s-diff-port-376255" context rescaled to 1 replicas
W1121 14:29:57.899680 255774 node_ready.go:57] node "default-k8s-diff-port-376255" has "Ready":"False" status (will retry)
W1121 14:29:57.174208 249617 node_ready.go:57] node "old-k8s-version-012258" has "Ready":"False" status (will retry)
W1121 14:29:59.672116 249617 node_ready.go:57] node "old-k8s-version-012258" has "Ready":"False" status (will retry)
I1121 14:30:00.114035 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
W1121 14:29:59.398191 252125 node_ready.go:57] node "no-preload-921956" has "Ready":"False" status (will retry)
W1121 14:30:01.898360 252125 node_ready.go:57] node "no-preload-921956" has "Ready":"False" status (will retry)
W1121 14:29:59.900344 255774 node_ready.go:57] node "default-k8s-diff-port-376255" has "Ready":"False" status (will retry)
W1121 14:30:01.900816 255774 node_ready.go:57] node "default-k8s-diff-port-376255" has "Ready":"False" status (will retry)
W1121 14:30:04.400331 255774 node_ready.go:57] node "default-k8s-diff-port-376255" has "Ready":"False" status (will retry)
W1121 14:30:01.672252 249617 node_ready.go:57] node "old-k8s-version-012258" has "Ready":"False" status (will retry)
W1121 14:30:04.171805 249617 node_ready.go:57] node "old-k8s-version-012258" has "Ready":"False" status (will retry)
I1121 14:30:05.672011 249617 node_ready.go:49] node "old-k8s-version-012258" is "Ready"
I1121 14:30:05.672046 249617 node_ready.go:38] duration metric: took 15.003519412s for node "old-k8s-version-012258" to be "Ready" ...
I1121 14:30:05.672064 249617 api_server.go:52] waiting for apiserver process to appear ...
I1121 14:30:05.672125 249617 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1121 14:30:05.689799 249617 api_server.go:72] duration metric: took 15.377593574s to wait for apiserver process to appear ...
I1121 14:30:05.689974 249617 api_server.go:88] waiting for apiserver healthz status ...
I1121 14:30:05.690001 249617 api_server.go:253] Checking apiserver healthz at https://192.168.94.2:8443/healthz ...
I1121 14:30:05.696217 249617 api_server.go:279] https://192.168.94.2:8443/healthz returned 200:
ok
I1121 14:30:05.697950 249617 api_server.go:141] control plane version: v1.28.0
I1121 14:30:05.697978 249617 api_server.go:131] duration metric: took 7.994891ms to wait for apiserver health ...
I1121 14:30:05.697990 249617 system_pods.go:43] waiting for kube-system pods to appear ...
I1121 14:30:05.702726 249617 system_pods.go:59] 8 kube-system pods found
I1121 14:30:05.702769 249617 system_pods.go:61] "coredns-5dd5756b68-vst4c" [3ca4df79-d875-498c-91b8-059d4f975bd0] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:05.702778 249617 system_pods.go:61] "etcd-old-k8s-version-012258" [2316d2c5-5731-4804-b900-b3ed4289f3d5] Running
I1121 14:30:05.702785 249617 system_pods.go:61] "kindnet-f6t7s" [bd28a6b5-0214-42be-8883-1adf1217761c] Running
I1121 14:30:05.702796 249617 system_pods.go:61] "kube-apiserver-old-k8s-version-012258" [fb018e50-0892-4250-9f7d-16731a31f2e5] Running
I1121 14:30:05.702808 249617 system_pods.go:61] "kube-controller-manager-old-k8s-version-012258" [7e21a806-9ed1-4e34-a635-f92287ab6545] Running
I1121 14:30:05.702818 249617 system_pods.go:61] "kube-proxy-wsp2w" [bc079c02-40ff-4f10-947b-76f1e9784572] Running
I1121 14:30:05.702822 249617 system_pods.go:61] "kube-scheduler-old-k8s-version-012258" [925c4663-2ad7-41a1-9606-3fbfe8e0904d] Running
I1121 14:30:05.702829 249617 system_pods.go:61] "storage-provisioner" [4195d236-52f6-4bfd-b47a-9cd7cd89bedd] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:05.702837 249617 system_pods.go:74] duration metric: took 4.84094ms to wait for pod list to return data ...
I1121 14:30:05.702852 249617 default_sa.go:34] waiting for default service account to be created ...
I1121 14:30:05.705127 249617 default_sa.go:45] found service account: "default"
I1121 14:30:05.705151 249617 default_sa.go:55] duration metric: took 2.290103ms for default service account to be created ...
I1121 14:30:05.705161 249617 system_pods.go:116] waiting for k8s-apps to be running ...
I1121 14:30:05.710235 249617 system_pods.go:86] 8 kube-system pods found
I1121 14:30:05.710318 249617 system_pods.go:89] "coredns-5dd5756b68-vst4c" [3ca4df79-d875-498c-91b8-059d4f975bd0] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:05.710330 249617 system_pods.go:89] "etcd-old-k8s-version-012258" [2316d2c5-5731-4804-b900-b3ed4289f3d5] Running
I1121 14:30:05.710337 249617 system_pods.go:89] "kindnet-f6t7s" [bd28a6b5-0214-42be-8883-1adf1217761c] Running
I1121 14:30:05.710367 249617 system_pods.go:89] "kube-apiserver-old-k8s-version-012258" [fb018e50-0892-4250-9f7d-16731a31f2e5] Running
I1121 14:30:05.710374 249617 system_pods.go:89] "kube-controller-manager-old-k8s-version-012258" [7e21a806-9ed1-4e34-a635-f92287ab6545] Running
I1121 14:30:05.710380 249617 system_pods.go:89] "kube-proxy-wsp2w" [bc079c02-40ff-4f10-947b-76f1e9784572] Running
I1121 14:30:05.710385 249617 system_pods.go:89] "kube-scheduler-old-k8s-version-012258" [925c4663-2ad7-41a1-9606-3fbfe8e0904d] Running
I1121 14:30:05.710404 249617 system_pods.go:89] "storage-provisioner" [4195d236-52f6-4bfd-b47a-9cd7cd89bedd] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:05.710597 249617 retry.go:31] will retry after 257.065607ms: missing components: kube-dns
I1121 14:30:05.972608 249617 system_pods.go:86] 8 kube-system pods found
I1121 14:30:05.972648 249617 system_pods.go:89] "coredns-5dd5756b68-vst4c" [3ca4df79-d875-498c-91b8-059d4f975bd0] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:05.972657 249617 system_pods.go:89] "etcd-old-k8s-version-012258" [2316d2c5-5731-4804-b900-b3ed4289f3d5] Running
I1121 14:30:05.972665 249617 system_pods.go:89] "kindnet-f6t7s" [bd28a6b5-0214-42be-8883-1adf1217761c] Running
I1121 14:30:05.972676 249617 system_pods.go:89] "kube-apiserver-old-k8s-version-012258" [fb018e50-0892-4250-9f7d-16731a31f2e5] Running
I1121 14:30:05.972682 249617 system_pods.go:89] "kube-controller-manager-old-k8s-version-012258" [7e21a806-9ed1-4e34-a635-f92287ab6545] Running
I1121 14:30:05.972687 249617 system_pods.go:89] "kube-proxy-wsp2w" [bc079c02-40ff-4f10-947b-76f1e9784572] Running
I1121 14:30:05.972692 249617 system_pods.go:89] "kube-scheduler-old-k8s-version-012258" [925c4663-2ad7-41a1-9606-3fbfe8e0904d] Running
I1121 14:30:05.972707 249617 system_pods.go:89] "storage-provisioner" [4195d236-52f6-4bfd-b47a-9cd7cd89bedd] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:05.972726 249617 retry.go:31] will retry after 339.692313ms: missing components: kube-dns
I1121 14:30:06.317124 249617 system_pods.go:86] 8 kube-system pods found
I1121 14:30:06.317155 249617 system_pods.go:89] "coredns-5dd5756b68-vst4c" [3ca4df79-d875-498c-91b8-059d4f975bd0] Running
I1121 14:30:06.317160 249617 system_pods.go:89] "etcd-old-k8s-version-012258" [2316d2c5-5731-4804-b900-b3ed4289f3d5] Running
I1121 14:30:06.317163 249617 system_pods.go:89] "kindnet-f6t7s" [bd28a6b5-0214-42be-8883-1adf1217761c] Running
I1121 14:30:06.317167 249617 system_pods.go:89] "kube-apiserver-old-k8s-version-012258" [fb018e50-0892-4250-9f7d-16731a31f2e5] Running
I1121 14:30:06.317171 249617 system_pods.go:89] "kube-controller-manager-old-k8s-version-012258" [7e21a806-9ed1-4e34-a635-f92287ab6545] Running
I1121 14:30:06.317175 249617 system_pods.go:89] "kube-proxy-wsp2w" [bc079c02-40ff-4f10-947b-76f1e9784572] Running
I1121 14:30:06.317178 249617 system_pods.go:89] "kube-scheduler-old-k8s-version-012258" [925c4663-2ad7-41a1-9606-3fbfe8e0904d] Running
I1121 14:30:06.317181 249617 system_pods.go:89] "storage-provisioner" [4195d236-52f6-4bfd-b47a-9cd7cd89bedd] Running
I1121 14:30:06.317188 249617 system_pods.go:126] duration metric: took 612.020803ms to wait for k8s-apps to be running ...
I1121 14:30:06.317194 249617 system_svc.go:44] waiting for kubelet service to be running ....
I1121 14:30:06.317250 249617 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1121 14:30:06.332295 249617 system_svc.go:56] duration metric: took 15.088564ms WaitForService to wait for kubelet
I1121 14:30:06.332331 249617 kubeadm.go:587] duration metric: took 16.020134285s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1121 14:30:06.332357 249617 node_conditions.go:102] verifying NodePressure condition ...
I1121 14:30:06.338044 249617 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1121 14:30:06.338071 249617 node_conditions.go:123] node cpu capacity is 8
I1121 14:30:06.338084 249617 node_conditions.go:105] duration metric: took 5.72136ms to run NodePressure ...
I1121 14:30:06.338096 249617 start.go:242] waiting for startup goroutines ...
I1121 14:30:06.338102 249617 start.go:247] waiting for cluster config update ...
I1121 14:30:06.338113 249617 start.go:256] writing updated cluster config ...
I1121 14:30:06.338382 249617 ssh_runner.go:195] Run: rm -f paused
I1121 14:30:06.342534 249617 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:30:06.347323 249617 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-vst4c" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.352062 249617 pod_ready.go:94] pod "coredns-5dd5756b68-vst4c" is "Ready"
I1121 14:30:06.352087 249617 pod_ready.go:86] duration metric: took 4.697932ms for pod "coredns-5dd5756b68-vst4c" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.354946 249617 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.359326 249617 pod_ready.go:94] pod "etcd-old-k8s-version-012258" is "Ready"
I1121 14:30:06.359355 249617 pod_ready.go:86] duration metric: took 4.388182ms for pod "etcd-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.362007 249617 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.366060 249617 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-012258" is "Ready"
I1121 14:30:06.366081 249617 pod_ready.go:86] duration metric: took 4.051984ms for pod "kube-apiserver-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.368789 249617 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.746914 249617 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-012258" is "Ready"
I1121 14:30:06.746952 249617 pod_ready.go:86] duration metric: took 378.141903ms for pod "kube-controller-manager-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.947790 249617 pod_ready.go:83] waiting for pod "kube-proxy-wsp2w" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:07.347266 249617 pod_ready.go:94] pod "kube-proxy-wsp2w" is "Ready"
I1121 14:30:07.347291 249617 pod_ready.go:86] duration metric: took 399.477159ms for pod "kube-proxy-wsp2w" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:07.547233 249617 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:07.946728 249617 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-012258" is "Ready"
I1121 14:30:07.946756 249617 pod_ready.go:86] duration metric: took 399.500525ms for pod "kube-scheduler-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:07.946772 249617 pod_ready.go:40] duration metric: took 1.604187461s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:30:08.009909 249617 start.go:628] kubectl: 1.34.2, cluster: 1.28.0 (minor skew: 6)
I1121 14:30:08.014607 249617 out.go:203]
W1121 14:30:08.016075 249617 out.go:285] ! /usr/local/bin/kubectl is version 1.34.2, which may have incompatibilities with Kubernetes 1.28.0.
I1121 14:30:08.020782 249617 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1121 14:30:08.022622 249617 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-012258" cluster and "default" namespace by default
I1121 14:30:05.115052 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1121 14:30:05.115115 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:30:05.115188 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:30:05.143819 213058 cri.go:89] found id: "56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324"
I1121 14:30:05.143839 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:30:05.143843 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:30:05.143846 213058 cri.go:89] found id: ""
I1121 14:30:05.143853 213058 logs.go:282] 3 containers: [56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:30:05.143912 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.148585 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.152984 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.156944 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:30:05.157004 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:30:05.185404 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:30:05.185430 213058 cri.go:89] found id: ""
I1121 14:30:05.185440 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:30:05.185498 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.190360 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:30:05.190432 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:30:05.222964 213058 cri.go:89] found id: ""
I1121 14:30:05.222989 213058 logs.go:282] 0 containers: []
W1121 14:30:05.222999 213058 logs.go:284] No container was found matching "coredns"
I1121 14:30:05.223006 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:30:05.223058 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:30:05.254414 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:30:05.254436 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:30:05.254440 213058 cri.go:89] found id: ""
I1121 14:30:05.254447 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:30:05.254505 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.258766 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.262456 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:30:05.262524 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:30:05.288454 213058 cri.go:89] found id: ""
I1121 14:30:05.288486 213058 logs.go:282] 0 containers: []
W1121 14:30:05.288496 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:30:05.288505 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:30:05.288598 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:30:05.317814 213058 cri.go:89] found id: "652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb"
I1121 14:30:05.317841 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:30:05.317847 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:30:05.317851 213058 cri.go:89] found id: ""
I1121 14:30:05.317861 213058 logs.go:282] 3 containers: [652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:30:05.317930 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.322506 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.326684 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.330828 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:30:05.330957 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:30:05.360073 213058 cri.go:89] found id: ""
I1121 14:30:05.360098 213058 logs.go:282] 0 containers: []
W1121 14:30:05.360107 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:30:05.360116 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:30:05.360171 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:30:05.388524 213058 cri.go:89] found id: ""
I1121 14:30:05.388561 213058 logs.go:282] 0 containers: []
W1121 14:30:05.388573 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:30:05.388587 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:30:05.388602 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:30:05.427247 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:30:05.427279 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:30:05.517583 213058 logs.go:123] Gathering logs for kube-apiserver [56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324] ...
I1121 14:30:05.517615 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324"
I1121 14:30:05.556205 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:30:05.556238 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:30:05.601637 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:30:05.601692 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:30:05.642125 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:30:05.642167 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:30:05.707252 213058 logs.go:123] Gathering logs for container status ...
I1121 14:30:05.707295 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:30:05.747947 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:30:05.747990 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:30:05.767646 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:30:05.767678 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:30:04.398534 252125 node_ready.go:57] node "no-preload-921956" has "Ready":"False" status (will retry)
W1121 14:30:06.897181 252125 node_ready.go:57] node "no-preload-921956" has "Ready":"False" status (will retry)
W1121 14:30:08.897492 252125 node_ready.go:57] node "no-preload-921956" has "Ready":"False" status (will retry)
W1121 14:30:06.900285 255774 node_ready.go:57] node "default-k8s-diff-port-376255" has "Ready":"False" status (will retry)
I1121 14:30:07.400113 255774 node_ready.go:49] node "default-k8s-diff-port-376255" is "Ready"
I1121 14:30:07.400148 255774 node_ready.go:38] duration metric: took 11.503726167s for node "default-k8s-diff-port-376255" to be "Ready" ...
I1121 14:30:07.400166 255774 api_server.go:52] waiting for apiserver process to appear ...
I1121 14:30:07.400227 255774 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1121 14:30:07.416428 255774 api_server.go:72] duration metric: took 11.804040955s to wait for apiserver process to appear ...
I1121 14:30:07.416462 255774 api_server.go:88] waiting for apiserver healthz status ...
I1121 14:30:07.416487 255774 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8444/healthz ...
I1121 14:30:07.423355 255774 api_server.go:279] https://192.168.85.2:8444/healthz returned 200:
ok
I1121 14:30:07.424441 255774 api_server.go:141] control plane version: v1.34.1
I1121 14:30:07.424471 255774 api_server.go:131] duration metric: took 8.001103ms to wait for apiserver health ...
I1121 14:30:07.424480 255774 system_pods.go:43] waiting for kube-system pods to appear ...
I1121 14:30:07.428816 255774 system_pods.go:59] 8 kube-system pods found
I1121 14:30:07.428856 255774 system_pods.go:61] "coredns-66bc5c9577-fr27b" [aecd7b98-657f-464e-9860-d060714bbc5d] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:07.428866 255774 system_pods.go:61] "etcd-default-k8s-diff-port-376255" [b46a8392-a768-4a1b-9a89-b0c3c349dc99] Running
I1121 14:30:07.428874 255774 system_pods.go:61] "kindnet-cdzd4" [f954f962-f79a-49e5-8b79-5fbd3c544ffc] Running
I1121 14:30:07.428880 255774 system_pods.go:61] "kube-apiserver-default-k8s-diff-port-376255" [727fff75-5ef1-4665-a510-82662517dd6f] Running
I1121 14:30:07.428886 255774 system_pods.go:61] "kube-controller-manager-default-k8s-diff-port-376255" [d642ccae-4e43-4b4b-9d8d-51515a1aae9c] Running
I1121 14:30:07.428891 255774 system_pods.go:61] "kube-proxy-hdplf" [f4b8f54c-361f-4748-9f31-92ffb753f404] Running
I1121 14:30:07.428899 255774 system_pods.go:61] "kube-scheduler-default-k8s-diff-port-376255" [72272f12-0226-4c07-9867-6cedf46539a4] Running
I1121 14:30:07.428912 255774 system_pods.go:61] "storage-provisioner" [4fa1d228-0310-45d2-87b6-91ce085f1f58] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:07.428921 255774 system_pods.go:74] duration metric: took 4.433771ms to wait for pod list to return data ...
I1121 14:30:07.428932 255774 default_sa.go:34] waiting for default service account to be created ...
I1121 14:30:07.431771 255774 default_sa.go:45] found service account: "default"
I1121 14:30:07.431794 255774 default_sa.go:55] duration metric: took 2.856811ms for default service account to be created ...
I1121 14:30:07.431804 255774 system_pods.go:116] waiting for k8s-apps to be running ...
I1121 14:30:07.435787 255774 system_pods.go:86] 8 kube-system pods found
I1121 14:30:07.435816 255774 system_pods.go:89] "coredns-66bc5c9577-fr27b" [aecd7b98-657f-464e-9860-d060714bbc5d] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:07.435821 255774 system_pods.go:89] "etcd-default-k8s-diff-port-376255" [b46a8392-a768-4a1b-9a89-b0c3c349dc99] Running
I1121 14:30:07.435826 255774 system_pods.go:89] "kindnet-cdzd4" [f954f962-f79a-49e5-8b79-5fbd3c544ffc] Running
I1121 14:30:07.435830 255774 system_pods.go:89] "kube-apiserver-default-k8s-diff-port-376255" [727fff75-5ef1-4665-a510-82662517dd6f] Running
I1121 14:30:07.435833 255774 system_pods.go:89] "kube-controller-manager-default-k8s-diff-port-376255" [d642ccae-4e43-4b4b-9d8d-51515a1aae9c] Running
I1121 14:30:07.435836 255774 system_pods.go:89] "kube-proxy-hdplf" [f4b8f54c-361f-4748-9f31-92ffb753f404] Running
I1121 14:30:07.435841 255774 system_pods.go:89] "kube-scheduler-default-k8s-diff-port-376255" [72272f12-0226-4c07-9867-6cedf46539a4] Running
I1121 14:30:07.435846 255774 system_pods.go:89] "storage-provisioner" [4fa1d228-0310-45d2-87b6-91ce085f1f58] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:07.435871 255774 retry.go:31] will retry after 217.060579ms: missing components: kube-dns
I1121 14:30:07.656900 255774 system_pods.go:86] 8 kube-system pods found
I1121 14:30:07.656930 255774 system_pods.go:89] "coredns-66bc5c9577-fr27b" [aecd7b98-657f-464e-9860-d060714bbc5d] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:07.656937 255774 system_pods.go:89] "etcd-default-k8s-diff-port-376255" [b46a8392-a768-4a1b-9a89-b0c3c349dc99] Running
I1121 14:30:07.656945 255774 system_pods.go:89] "kindnet-cdzd4" [f954f962-f79a-49e5-8b79-5fbd3c544ffc] Running
I1121 14:30:07.656950 255774 system_pods.go:89] "kube-apiserver-default-k8s-diff-port-376255" [727fff75-5ef1-4665-a510-82662517dd6f] Running
I1121 14:30:07.656955 255774 system_pods.go:89] "kube-controller-manager-default-k8s-diff-port-376255" [d642ccae-4e43-4b4b-9d8d-51515a1aae9c] Running
I1121 14:30:07.656959 255774 system_pods.go:89] "kube-proxy-hdplf" [f4b8f54c-361f-4748-9f31-92ffb753f404] Running
I1121 14:30:07.656964 255774 system_pods.go:89] "kube-scheduler-default-k8s-diff-port-376255" [72272f12-0226-4c07-9867-6cedf46539a4] Running
I1121 14:30:07.656970 255774 system_pods.go:89] "storage-provisioner" [4fa1d228-0310-45d2-87b6-91ce085f1f58] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:07.656989 255774 retry.go:31] will retry after 330.648304ms: missing components: kube-dns
I1121 14:30:07.995514 255774 system_pods.go:86] 8 kube-system pods found
I1121 14:30:07.995612 255774 system_pods.go:89] "coredns-66bc5c9577-fr27b" [aecd7b98-657f-464e-9860-d060714bbc5d] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:07.995626 255774 system_pods.go:89] "etcd-default-k8s-diff-port-376255" [b46a8392-a768-4a1b-9a89-b0c3c349dc99] Running
I1121 14:30:07.995636 255774 system_pods.go:89] "kindnet-cdzd4" [f954f962-f79a-49e5-8b79-5fbd3c544ffc] Running
I1121 14:30:07.995642 255774 system_pods.go:89] "kube-apiserver-default-k8s-diff-port-376255" [727fff75-5ef1-4665-a510-82662517dd6f] Running
I1121 14:30:07.995653 255774 system_pods.go:89] "kube-controller-manager-default-k8s-diff-port-376255" [d642ccae-4e43-4b4b-9d8d-51515a1aae9c] Running
I1121 14:30:07.995659 255774 system_pods.go:89] "kube-proxy-hdplf" [f4b8f54c-361f-4748-9f31-92ffb753f404] Running
I1121 14:30:07.995664 255774 system_pods.go:89] "kube-scheduler-default-k8s-diff-port-376255" [72272f12-0226-4c07-9867-6cedf46539a4] Running
I1121 14:30:07.995683 255774 system_pods.go:89] "storage-provisioner" [4fa1d228-0310-45d2-87b6-91ce085f1f58] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:07.995713 255774 retry.go:31] will retry after 466.383408ms: missing components: kube-dns
I1121 14:30:08.466385 255774 system_pods.go:86] 8 kube-system pods found
I1121 14:30:08.466414 255774 system_pods.go:89] "coredns-66bc5c9577-fr27b" [aecd7b98-657f-464e-9860-d060714bbc5d] Running
I1121 14:30:08.466419 255774 system_pods.go:89] "etcd-default-k8s-diff-port-376255" [b46a8392-a768-4a1b-9a89-b0c3c349dc99] Running
I1121 14:30:08.466423 255774 system_pods.go:89] "kindnet-cdzd4" [f954f962-f79a-49e5-8b79-5fbd3c544ffc] Running
I1121 14:30:08.466427 255774 system_pods.go:89] "kube-apiserver-default-k8s-diff-port-376255" [727fff75-5ef1-4665-a510-82662517dd6f] Running
I1121 14:30:08.466430 255774 system_pods.go:89] "kube-controller-manager-default-k8s-diff-port-376255" [d642ccae-4e43-4b4b-9d8d-51515a1aae9c] Running
I1121 14:30:08.466435 255774 system_pods.go:89] "kube-proxy-hdplf" [f4b8f54c-361f-4748-9f31-92ffb753f404] Running
I1121 14:30:08.466438 255774 system_pods.go:89] "kube-scheduler-default-k8s-diff-port-376255" [72272f12-0226-4c07-9867-6cedf46539a4] Running
I1121 14:30:08.466441 255774 system_pods.go:89] "storage-provisioner" [4fa1d228-0310-45d2-87b6-91ce085f1f58] Running
I1121 14:30:08.466448 255774 system_pods.go:126] duration metric: took 1.034639333s to wait for k8s-apps to be running ...
I1121 14:30:08.466454 255774 system_svc.go:44] waiting for kubelet service to be running ....
I1121 14:30:08.466495 255774 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1121 14:30:08.480058 255774 system_svc.go:56] duration metric: took 13.59071ms WaitForService to wait for kubelet
I1121 14:30:08.480087 255774 kubeadm.go:587] duration metric: took 12.867708638s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1121 14:30:08.480104 255774 node_conditions.go:102] verifying NodePressure condition ...
I1121 14:30:08.483054 255774 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1121 14:30:08.483077 255774 node_conditions.go:123] node cpu capacity is 8
I1121 14:30:08.483089 255774 node_conditions.go:105] duration metric: took 2.980591ms to run NodePressure ...
I1121 14:30:08.483101 255774 start.go:242] waiting for startup goroutines ...
I1121 14:30:08.483107 255774 start.go:247] waiting for cluster config update ...
I1121 14:30:08.483116 255774 start.go:256] writing updated cluster config ...
I1121 14:30:08.483378 255774 ssh_runner.go:195] Run: rm -f paused
I1121 14:30:08.487457 255774 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:30:08.490869 255774 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-fr27b" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:08.495613 255774 pod_ready.go:94] pod "coredns-66bc5c9577-fr27b" is "Ready"
I1121 14:30:08.495638 255774 pod_ready.go:86] duration metric: took 4.745112ms for pod "coredns-66bc5c9577-fr27b" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:08.498070 255774 pod_ready.go:83] waiting for pod "etcd-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:08.502098 255774 pod_ready.go:94] pod "etcd-default-k8s-diff-port-376255" is "Ready"
I1121 14:30:08.502122 255774 pod_ready.go:86] duration metric: took 4.029361ms for pod "etcd-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:08.504276 255774 pod_ready.go:83] waiting for pod "kube-apiserver-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:08.508229 255774 pod_ready.go:94] pod "kube-apiserver-default-k8s-diff-port-376255" is "Ready"
I1121 14:30:08.508250 255774 pod_ready.go:86] duration metric: took 3.957821ms for pod "kube-apiserver-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:08.510387 255774 pod_ready.go:83] waiting for pod "kube-controller-manager-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:08.891344 255774 pod_ready.go:94] pod "kube-controller-manager-default-k8s-diff-port-376255" is "Ready"
I1121 14:30:08.891369 255774 pod_ready.go:86] duration metric: took 380.959206ms for pod "kube-controller-manager-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:09.091636 255774 pod_ready.go:83] waiting for pod "kube-proxy-hdplf" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:09.492078 255774 pod_ready.go:94] pod "kube-proxy-hdplf" is "Ready"
I1121 14:30:09.492108 255774 pod_ready.go:86] duration metric: took 400.444722ms for pod "kube-proxy-hdplf" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:09.693278 255774 pod_ready.go:83] waiting for pod "kube-scheduler-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:10.092105 255774 pod_ready.go:94] pod "kube-scheduler-default-k8s-diff-port-376255" is "Ready"
I1121 14:30:10.092133 255774 pod_ready.go:86] duration metric: took 398.824976ms for pod "kube-scheduler-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:10.092146 255774 pod_ready.go:40] duration metric: took 1.604655578s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:30:10.138628 255774 start.go:628] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1121 14:30:10.140593 255774 out.go:179] * Done! kubectl is now configured to use "default-k8s-diff-port-376255" cluster and "default" namespace by default
I1121 14:30:08.754284 213058 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (2.986586875s)
W1121 14:30:08.754342 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Get "https://localhost:8443/api/v1/nodes?limit=500": dial tcp [::1]:8443: connect: connection refused - error from a previous attempt: read tcp [::1]:60538->[::1]:8443: read: connection reset by peer
output:
** stderr **
Get "https://localhost:8443/api/v1/nodes?limit=500": dial tcp [::1]:8443: connect: connection refused - error from a previous attempt: read tcp [::1]:60538->[::1]:8443: read: connection reset by peer
** /stderr **
I1121 14:30:08.754352 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:30:08.754366 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:30:08.789119 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:30:08.789149 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:30:08.842933 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:30:08.842974 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:30:08.880878 213058 logs.go:123] Gathering logs for kube-controller-manager [652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb] ...
I1121 14:30:08.880919 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb"
I1121 14:30:08.910920 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:30:08.910953 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:30:11.440020 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:30:11.440496 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:30:11.440556 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:30:11.440601 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:30:11.472645 213058 cri.go:89] found id: "56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324"
I1121 14:30:11.472669 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:30:11.472674 213058 cri.go:89] found id: ""
I1121 14:30:11.472683 213058 logs.go:282] 2 containers: [56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:30:11.472748 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.478061 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.482946 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:30:11.483034 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:30:11.517693 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:30:11.517722 213058 cri.go:89] found id: ""
I1121 14:30:11.517732 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:30:11.517797 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.523621 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:30:11.523699 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:30:11.559155 213058 cri.go:89] found id: ""
I1121 14:30:11.559194 213058 logs.go:282] 0 containers: []
W1121 14:30:11.559204 213058 logs.go:284] No container was found matching "coredns"
I1121 14:30:11.559212 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:30:11.559271 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:30:11.595093 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:30:11.595127 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:30:11.595133 213058 cri.go:89] found id: ""
I1121 14:30:11.595143 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:30:11.595194 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.600085 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.604973 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:30:11.605048 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:30:11.639606 213058 cri.go:89] found id: ""
I1121 14:30:11.639636 213058 logs.go:282] 0 containers: []
W1121 14:30:11.639647 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:30:11.639653 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:30:11.639713 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:30:11.684373 213058 cri.go:89] found id: "652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb"
I1121 14:30:11.684400 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:30:11.684405 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:30:11.684410 213058 cri.go:89] found id: ""
I1121 14:30:11.684421 213058 logs.go:282] 3 containers: [652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:30:11.684482 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.689732 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.695253 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.701315 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:30:11.701388 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:30:11.732802 213058 cri.go:89] found id: ""
I1121 14:30:11.732831 213058 logs.go:282] 0 containers: []
W1121 14:30:11.732841 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:30:11.732848 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:30:11.732907 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:30:11.761686 213058 cri.go:89] found id: ""
I1121 14:30:11.761717 213058 logs.go:282] 0 containers: []
W1121 14:30:11.761729 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:30:11.761741 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:30:11.761756 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:30:11.816634 213058 logs.go:123] Gathering logs for kube-controller-manager [652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb] ...
I1121 14:30:11.816670 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb"
I1121 14:30:11.846024 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:30:11.846055 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:30:11.876932 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:30:11.876964 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:30:11.912984 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:30:11.913018 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:30:11.965381 213058 logs.go:123] Gathering logs for container status ...
I1121 14:30:11.965423 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:30:11.997477 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:30:11.997509 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:30:12.011497 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:30:12.011524 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:30:12.071024 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:30:12.071049 213058 logs.go:123] Gathering logs for kube-apiserver [56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324] ...
I1121 14:30:12.071065 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324"
I1121 14:30:12.106865 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:30:12.106898 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:30:12.141245 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:30:12.141276 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:30:12.176551 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:30:12.176600 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:30:12.268742 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:30:12.268780 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
W1121 14:30:10.897620 252125 node_ready.go:57] node "no-preload-921956" has "Ready":"False" status (will retry)
I1121 14:30:11.398100 252125 node_ready.go:49] node "no-preload-921956" is "Ready"
I1121 14:30:11.398128 252125 node_ready.go:38] duration metric: took 14.003530083s for node "no-preload-921956" to be "Ready" ...
I1121 14:30:11.398142 252125 api_server.go:52] waiting for apiserver process to appear ...
I1121 14:30:11.398195 252125 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1121 14:30:11.412043 252125 api_server.go:72] duration metric: took 14.35241025s to wait for apiserver process to appear ...
I1121 14:30:11.412070 252125 api_server.go:88] waiting for apiserver healthz status ...
I1121 14:30:11.412087 252125 api_server.go:253] Checking apiserver healthz at https://192.168.103.2:8443/healthz ...
I1121 14:30:11.417254 252125 api_server.go:279] https://192.168.103.2:8443/healthz returned 200:
ok
I1121 14:30:11.418517 252125 api_server.go:141] control plane version: v1.34.1
I1121 14:30:11.418570 252125 api_server.go:131] duration metric: took 6.492303ms to wait for apiserver health ...
I1121 14:30:11.418581 252125 system_pods.go:43] waiting for kube-system pods to appear ...
I1121 14:30:11.421927 252125 system_pods.go:59] 8 kube-system pods found
I1121 14:30:11.422024 252125 system_pods.go:61] "coredns-66bc5c9577-s4rzb" [4941c273-72bf-49af-ad72-793444a43d21] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:11.422034 252125 system_pods.go:61] "etcd-no-preload-921956" [2b973978-8ff2-488f-b54b-80bb44d4f320] Running
I1121 14:30:11.422047 252125 system_pods.go:61] "kindnet-kf24h" [c698f297-3ff4-4f90-a871-5c4c944b9e61] Running
I1121 14:30:11.422059 252125 system_pods.go:61] "kube-apiserver-no-preload-921956" [11865678-b4f0-4cb1-9f82-9c59edf0d6e6] Running
I1121 14:30:11.422069 252125 system_pods.go:61] "kube-controller-manager-no-preload-921956" [5740abab-80b7-4352-8d44-40c9ad7fc713] Running
I1121 14:30:11.422073 252125 system_pods.go:61] "kube-proxy-wmx7z" [7d5a84f9-144c-4920-a08d-478587a56498] Running
I1121 14:30:11.422077 252125 system_pods.go:61] "kube-scheduler-no-preload-921956" [a200f6cd-f579-45e7-9f94-080ca622a30b] Running
I1121 14:30:11.422082 252125 system_pods.go:61] "storage-provisioner" [75fb9c04-833c-4511-83c7-380f4848e49d] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:11.422094 252125 system_pods.go:74] duration metric: took 3.505153ms to wait for pod list to return data ...
I1121 14:30:11.422109 252125 default_sa.go:34] waiting for default service account to be created ...
I1121 14:30:11.424685 252125 default_sa.go:45] found service account: "default"
I1121 14:30:11.424710 252125 default_sa.go:55] duration metric: took 2.591611ms for default service account to be created ...
I1121 14:30:11.424722 252125 system_pods.go:116] waiting for k8s-apps to be running ...
I1121 14:30:11.427627 252125 system_pods.go:86] 8 kube-system pods found
I1121 14:30:11.427680 252125 system_pods.go:89] "coredns-66bc5c9577-s4rzb" [4941c273-72bf-49af-ad72-793444a43d21] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:11.427689 252125 system_pods.go:89] "etcd-no-preload-921956" [2b973978-8ff2-488f-b54b-80bb44d4f320] Running
I1121 14:30:11.427703 252125 system_pods.go:89] "kindnet-kf24h" [c698f297-3ff4-4f90-a871-5c4c944b9e61] Running
I1121 14:30:11.427713 252125 system_pods.go:89] "kube-apiserver-no-preload-921956" [11865678-b4f0-4cb1-9f82-9c59edf0d6e6] Running
I1121 14:30:11.427721 252125 system_pods.go:89] "kube-controller-manager-no-preload-921956" [5740abab-80b7-4352-8d44-40c9ad7fc713] Running
I1121 14:30:11.427726 252125 system_pods.go:89] "kube-proxy-wmx7z" [7d5a84f9-144c-4920-a08d-478587a56498] Running
I1121 14:30:11.427731 252125 system_pods.go:89] "kube-scheduler-no-preload-921956" [a200f6cd-f579-45e7-9f94-080ca622a30b] Running
I1121 14:30:11.427737 252125 system_pods.go:89] "storage-provisioner" [75fb9c04-833c-4511-83c7-380f4848e49d] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:11.427768 252125 retry.go:31] will retry after 234.428318ms: missing components: kube-dns
I1121 14:30:11.669788 252125 system_pods.go:86] 8 kube-system pods found
I1121 14:30:11.669831 252125 system_pods.go:89] "coredns-66bc5c9577-s4rzb" [4941c273-72bf-49af-ad72-793444a43d21] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:11.669840 252125 system_pods.go:89] "etcd-no-preload-921956" [2b973978-8ff2-488f-b54b-80bb44d4f320] Running
I1121 14:30:11.669850 252125 system_pods.go:89] "kindnet-kf24h" [c698f297-3ff4-4f90-a871-5c4c944b9e61] Running
I1121 14:30:11.669858 252125 system_pods.go:89] "kube-apiserver-no-preload-921956" [11865678-b4f0-4cb1-9f82-9c59edf0d6e6] Running
I1121 14:30:11.669865 252125 system_pods.go:89] "kube-controller-manager-no-preload-921956" [5740abab-80b7-4352-8d44-40c9ad7fc713] Running
I1121 14:30:11.669871 252125 system_pods.go:89] "kube-proxy-wmx7z" [7d5a84f9-144c-4920-a08d-478587a56498] Running
I1121 14:30:11.669877 252125 system_pods.go:89] "kube-scheduler-no-preload-921956" [a200f6cd-f579-45e7-9f94-080ca622a30b] Running
I1121 14:30:11.669893 252125 system_pods.go:89] "storage-provisioner" [75fb9c04-833c-4511-83c7-380f4848e49d] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:11.669919 252125 retry.go:31] will retry after 250.085803ms: missing components: kube-dns
I1121 14:30:11.924517 252125 system_pods.go:86] 8 kube-system pods found
I1121 14:30:11.924602 252125 system_pods.go:89] "coredns-66bc5c9577-s4rzb" [4941c273-72bf-49af-ad72-793444a43d21] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:11.924614 252125 system_pods.go:89] "etcd-no-preload-921956" [2b973978-8ff2-488f-b54b-80bb44d4f320] Running
I1121 14:30:11.924627 252125 system_pods.go:89] "kindnet-kf24h" [c698f297-3ff4-4f90-a871-5c4c944b9e61] Running
I1121 14:30:11.924633 252125 system_pods.go:89] "kube-apiserver-no-preload-921956" [11865678-b4f0-4cb1-9f82-9c59edf0d6e6] Running
I1121 14:30:11.924642 252125 system_pods.go:89] "kube-controller-manager-no-preload-921956" [5740abab-80b7-4352-8d44-40c9ad7fc713] Running
I1121 14:30:11.924647 252125 system_pods.go:89] "kube-proxy-wmx7z" [7d5a84f9-144c-4920-a08d-478587a56498] Running
I1121 14:30:11.924653 252125 system_pods.go:89] "kube-scheduler-no-preload-921956" [a200f6cd-f579-45e7-9f94-080ca622a30b] Running
I1121 14:30:11.924661 252125 system_pods.go:89] "storage-provisioner" [75fb9c04-833c-4511-83c7-380f4848e49d] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:11.924682 252125 retry.go:31] will retry after 441.862758ms: missing components: kube-dns
I1121 14:30:12.371065 252125 system_pods.go:86] 8 kube-system pods found
I1121 14:30:12.371110 252125 system_pods.go:89] "coredns-66bc5c9577-s4rzb" [4941c273-72bf-49af-ad72-793444a43d21] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:12.371122 252125 system_pods.go:89] "etcd-no-preload-921956" [2b973978-8ff2-488f-b54b-80bb44d4f320] Running
I1121 14:30:12.371131 252125 system_pods.go:89] "kindnet-kf24h" [c698f297-3ff4-4f90-a871-5c4c944b9e61] Running
I1121 14:30:12.371136 252125 system_pods.go:89] "kube-apiserver-no-preload-921956" [11865678-b4f0-4cb1-9f82-9c59edf0d6e6] Running
I1121 14:30:12.371142 252125 system_pods.go:89] "kube-controller-manager-no-preload-921956" [5740abab-80b7-4352-8d44-40c9ad7fc713] Running
I1121 14:30:12.371147 252125 system_pods.go:89] "kube-proxy-wmx7z" [7d5a84f9-144c-4920-a08d-478587a56498] Running
I1121 14:30:12.371158 252125 system_pods.go:89] "kube-scheduler-no-preload-921956" [a200f6cd-f579-45e7-9f94-080ca622a30b] Running
I1121 14:30:12.371170 252125 system_pods.go:89] "storage-provisioner" [75fb9c04-833c-4511-83c7-380f4848e49d] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:12.371189 252125 retry.go:31] will retry after 502.578888ms: missing components: kube-dns
I1121 14:30:12.879209 252125 system_pods.go:86] 8 kube-system pods found
I1121 14:30:12.879243 252125 system_pods.go:89] "coredns-66bc5c9577-s4rzb" [4941c273-72bf-49af-ad72-793444a43d21] Running
I1121 14:30:12.879249 252125 system_pods.go:89] "etcd-no-preload-921956" [2b973978-8ff2-488f-b54b-80bb44d4f320] Running
I1121 14:30:12.879253 252125 system_pods.go:89] "kindnet-kf24h" [c698f297-3ff4-4f90-a871-5c4c944b9e61] Running
I1121 14:30:12.879258 252125 system_pods.go:89] "kube-apiserver-no-preload-921956" [11865678-b4f0-4cb1-9f82-9c59edf0d6e6] Running
I1121 14:30:12.879268 252125 system_pods.go:89] "kube-controller-manager-no-preload-921956" [5740abab-80b7-4352-8d44-40c9ad7fc713] Running
I1121 14:30:12.879271 252125 system_pods.go:89] "kube-proxy-wmx7z" [7d5a84f9-144c-4920-a08d-478587a56498] Running
I1121 14:30:12.879275 252125 system_pods.go:89] "kube-scheduler-no-preload-921956" [a200f6cd-f579-45e7-9f94-080ca622a30b] Running
I1121 14:30:12.879278 252125 system_pods.go:89] "storage-provisioner" [75fb9c04-833c-4511-83c7-380f4848e49d] Running
I1121 14:30:12.879289 252125 system_pods.go:126] duration metric: took 1.454561179s to wait for k8s-apps to be running ...
I1121 14:30:12.879301 252125 system_svc.go:44] waiting for kubelet service to be running ....
I1121 14:30:12.879351 252125 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1121 14:30:12.894061 252125 system_svc.go:56] duration metric: took 14.74714ms WaitForService to wait for kubelet
I1121 14:30:12.894092 252125 kubeadm.go:587] duration metric: took 15.834465857s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1121 14:30:12.894115 252125 node_conditions.go:102] verifying NodePressure condition ...
I1121 14:30:12.897599 252125 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1121 14:30:12.897630 252125 node_conditions.go:123] node cpu capacity is 8
I1121 14:30:12.897641 252125 node_conditions.go:105] duration metric: took 3.520753ms to run NodePressure ...
I1121 14:30:12.897652 252125 start.go:242] waiting for startup goroutines ...
I1121 14:30:12.897659 252125 start.go:247] waiting for cluster config update ...
I1121 14:30:12.897669 252125 start.go:256] writing updated cluster config ...
I1121 14:30:12.897983 252125 ssh_runner.go:195] Run: rm -f paused
I1121 14:30:12.902897 252125 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:30:12.906562 252125 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-s4rzb" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:12.912263 252125 pod_ready.go:94] pod "coredns-66bc5c9577-s4rzb" is "Ready"
I1121 14:30:12.912286 252125 pod_ready.go:86] duration metric: took 5.702456ms for pod "coredns-66bc5c9577-s4rzb" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:12.915190 252125 pod_ready.go:83] waiting for pod "etcd-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:12.919870 252125 pod_ready.go:94] pod "etcd-no-preload-921956" is "Ready"
I1121 14:30:12.919896 252125 pod_ready.go:86] duration metric: took 4.68423ms for pod "etcd-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:12.921926 252125 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:12.925984 252125 pod_ready.go:94] pod "kube-apiserver-no-preload-921956" is "Ready"
I1121 14:30:12.926012 252125 pod_ready.go:86] duration metric: took 4.065762ms for pod "kube-apiserver-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:12.928283 252125 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:13.307608 252125 pod_ready.go:94] pod "kube-controller-manager-no-preload-921956" is "Ready"
I1121 14:30:13.307639 252125 pod_ready.go:86] duration metric: took 379.335151ms for pod "kube-controller-manager-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:13.508229 252125 pod_ready.go:83] waiting for pod "kube-proxy-wmx7z" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:13.907070 252125 pod_ready.go:94] pod "kube-proxy-wmx7z" is "Ready"
I1121 14:30:13.907101 252125 pod_ready.go:86] duration metric: took 398.843128ms for pod "kube-proxy-wmx7z" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:14.108040 252125 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:14.507264 252125 pod_ready.go:94] pod "kube-scheduler-no-preload-921956" is "Ready"
I1121 14:30:14.507293 252125 pod_ready.go:86] duration metric: took 399.219492ms for pod "kube-scheduler-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:14.507307 252125 pod_ready.go:40] duration metric: took 1.604362709s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:30:14.554506 252125 start.go:628] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1121 14:30:14.556366 252125 out.go:179] * Done! kubectl is now configured to use "no-preload-921956" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
b902d4d95366e 56cc512116c8f 7 seconds ago Running busybox 0 650f980a2b9de busybox default
4cd21f3197431 6e38f40d628db 12 seconds ago Running storage-provisioner 0 23e45253f8c7e storage-provisioner kube-system
5c05a4ce99693 ead0a4a53df89 12 seconds ago Running coredns 0 4a38fce5ce541 coredns-5dd5756b68-vst4c kube-system
14f62b42937d6 409467f978b4a 23 seconds ago Running kindnet-cni 0 2189620d082f5 kindnet-f6t7s kube-system
7b9fdeac6c297 ea1030da44aa1 27 seconds ago Running kube-proxy 0 7e0d6db9e6b3d kube-proxy-wsp2w kube-system
2ff2d15ad456d f6f496300a2ae 46 seconds ago Running kube-scheduler 0 a2abbb0781499 kube-scheduler-old-k8s-version-012258 kube-system
bff5755d3bb4c bb5e0dde9054c 46 seconds ago Running kube-apiserver 0 0f35f911732de kube-apiserver-old-k8s-version-012258 kube-system
24c3a525c2057 73deb9a3f7025 46 seconds ago Running etcd 0 11bd8f3a7d6a7 etcd-old-k8s-version-012258 kube-system
9694941d50234 4be79c38a4bab 46 seconds ago Running kube-controller-manager 0 45f5f9128f983 kube-controller-manager-old-k8s-version-012258 kube-system
==> containerd <==
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.663617176Z" level=info msg="StartContainer for \"5c05a4ce996931fe774ecca66b33620ebb8a09a835d63b1f0ddd04105345bb76\""
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.663619446Z" level=info msg="Container 4cd21f31974314e5db6d58ee50bbd67f0daf675c91355ac568f2d0140f7a8d6c: CDI devices from CRI Config.CDIDevices: []"
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.664751507Z" level=info msg="connecting to shim 5c05a4ce996931fe774ecca66b33620ebb8a09a835d63b1f0ddd04105345bb76" address="unix:///run/containerd/s/0b88234bafabade7aa89e6626d296420e30066b3991abfec21350310268aa8a7" protocol=ttrpc version=3
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.672254961Z" level=info msg="CreateContainer within sandbox \"23e45253f8c7ee6d14427e06305531cf9d976c8c976bd1a48cedecbea7976313\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"4cd21f31974314e5db6d58ee50bbd67f0daf675c91355ac568f2d0140f7a8d6c\""
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.673493529Z" level=info msg="StartContainer for \"4cd21f31974314e5db6d58ee50bbd67f0daf675c91355ac568f2d0140f7a8d6c\""
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.674511601Z" level=info msg="connecting to shim 4cd21f31974314e5db6d58ee50bbd67f0daf675c91355ac568f2d0140f7a8d6c" address="unix:///run/containerd/s/a82bd5a517bceb0823436c092fd804897bb31601e146a9022325dd22f0adc41d" protocol=ttrpc version=3
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.728082486Z" level=info msg="StartContainer for \"4cd21f31974314e5db6d58ee50bbd67f0daf675c91355ac568f2d0140f7a8d6c\" returns successfully"
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.728959460Z" level=info msg="StartContainer for \"5c05a4ce996931fe774ecca66b33620ebb8a09a835d63b1f0ddd04105345bb76\" returns successfully"
Nov 21 14:30:08 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:08.528101810Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:fa895e52-0bff-4604-8b62-fd0f087015e8,Namespace:default,Attempt:0,}"
Nov 21 14:30:08 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:08.569589613Z" level=info msg="connecting to shim 650f980a2b9de14dfd5f63378bb97f102c6ac2132a9ada4c16a5ef068e7d2a2c" address="unix:///run/containerd/s/5e291cbce6d45d78977b32eb821eca28abc28581b57d5fa47a45bc5da629cfec" namespace=k8s.io protocol=ttrpc version=3
Nov 21 14:30:08 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:08.641364674Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:fa895e52-0bff-4604-8b62-fd0f087015e8,Namespace:default,Attempt:0,} returns sandbox id \"650f980a2b9de14dfd5f63378bb97f102c6ac2132a9ada4c16a5ef068e7d2a2c\""
Nov 21 14:30:08 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:08.643152152Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.895297688Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.896188926Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396646"
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.897638365Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.900612481Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.901224670Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.258026607s"
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.901267593Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.903245567Z" level=info msg="CreateContainer within sandbox \"650f980a2b9de14dfd5f63378bb97f102c6ac2132a9ada4c16a5ef068e7d2a2c\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.911518272Z" level=info msg="Container b902d4d95366e27e951b3537262d21dd82f809e7ad84dd34083f4c621ca4b23b: CDI devices from CRI Config.CDIDevices: []"
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.918169889Z" level=info msg="CreateContainer within sandbox \"650f980a2b9de14dfd5f63378bb97f102c6ac2132a9ada4c16a5ef068e7d2a2c\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"b902d4d95366e27e951b3537262d21dd82f809e7ad84dd34083f4c621ca4b23b\""
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.918839732Z" level=info msg="StartContainer for \"b902d4d95366e27e951b3537262d21dd82f809e7ad84dd34083f4c621ca4b23b\""
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.919846340Z" level=info msg="connecting to shim b902d4d95366e27e951b3537262d21dd82f809e7ad84dd34083f4c621ca4b23b" address="unix:///run/containerd/s/5e291cbce6d45d78977b32eb821eca28abc28581b57d5fa47a45bc5da629cfec" protocol=ttrpc version=3
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.971722510Z" level=info msg="StartContainer for \"b902d4d95366e27e951b3537262d21dd82f809e7ad84dd34083f4c621ca4b23b\" returns successfully"
Nov 21 14:30:17 old-k8s-version-012258 containerd[665]: E1121 14:30:17.320736 665 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [5c05a4ce996931fe774ecca66b33620ebb8a09a835d63b1f0ddd04105345bb76] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 4c7f44b73086be760ec9e64204f63c5cc5a952c8c1c55ba0b41d8fc3315ce3c7d0259d04847cb8b4561043d4549603f3bccfd9b397eeb814eef159d244d26f39
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:46429 - 55004 "HINFO IN 8589807954474471726.703758692042272696. udp 56 false 512" NXDOMAIN qr,rd,ra 131 0.027956792s
==> describe nodes <==
Name: old-k8s-version-012258
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-012258
kubernetes.io/os=linux
minikube.k8s.io/commit=29e0798733fefbdc471fd2bbb38f6a7ae2a26162
minikube.k8s.io/name=old-k8s-version-012258
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_21T14_29_38_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 21 Nov 2025 14:29:34 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-012258
AcquireTime: <unset>
RenewTime: Fri, 21 Nov 2025 14:30:17 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 21 Nov 2025 14:30:07 +0000 Fri, 21 Nov 2025 14:29:33 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 21 Nov 2025 14:30:07 +0000 Fri, 21 Nov 2025 14:29:33 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 21 Nov 2025 14:30:07 +0000 Fri, 21 Nov 2025 14:29:33 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 21 Nov 2025 14:30:07 +0000 Fri, 21 Nov 2025 14:30:05 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.94.2
Hostname: old-k8s-version-012258
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863352Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863352Ki
pods: 110
System Info:
Machine ID: cf10fb2f940d419c1d138723691cfee8
System UUID: b90c39b5-fac8-48f3-bfec-9ba818fb6bc5
Boot ID: f900700b-0668-4d24-87ff-85e15fbda365
Kernel Version: 6.8.0-1044-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 10s
kube-system coredns-5dd5756b68-vst4c 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 28s
kube-system etcd-old-k8s-version-012258 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 43s
kube-system kindnet-f6t7s 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 28s
kube-system kube-apiserver-old-k8s-version-012258 250m (3%) 0 (0%) 0 (0%) 0 (0%) 41s
kube-system kube-controller-manager-old-k8s-version-012258 200m (2%) 0 (0%) 0 (0%) 0 (0%) 41s
kube-system kube-proxy-wsp2w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 28s
kube-system kube-scheduler-old-k8s-version-012258 100m (1%) 0 (0%) 0 (0%) 0 (0%) 41s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 28s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 27s kube-proxy
Normal Starting 48s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 47s (x8 over 48s) kubelet Node old-k8s-version-012258 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 47s (x8 over 48s) kubelet Node old-k8s-version-012258 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 47s (x7 over 48s) kubelet Node old-k8s-version-012258 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 47s kubelet Updated Node Allocatable limit across pods
Normal Starting 41s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 41s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 41s kubelet Node old-k8s-version-012258 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 41s kubelet Node old-k8s-version-012258 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 41s kubelet Node old-k8s-version-012258 status is now: NodeHasSufficientPID
Normal RegisteredNode 29s node-controller Node old-k8s-version-012258 event: Registered Node old-k8s-version-012258 in Controller
Normal NodeReady 13s kubelet Node old-k8s-version-012258 status is now: NodeReady
==> dmesg <==
[Nov21 13:17] MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.
[ +0.001887] TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.
[ +0.001001] MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.
[ +0.086016] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.440508] i8042: Warning: Keylock active
[ +0.011202] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.526419] block sda: the capability attribute has been deprecated.
[ +0.095215] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.027093] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +5.485024] kauditd_printk_skb: 47 callbacks suppressed
==> etcd [24c3a525c2057be14d63a0b83d320542988e06c148db3abcea70288b84ad9d55] <==
{"level":"info","ts":"2025-11-21T14:29:32.241252Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"da400bbece288f5a","local-member-id":"dfc97eb0aae75b33","added-peer-id":"dfc97eb0aae75b33","added-peer-peer-urls":["https://192.168.94.2:2380"]}
{"level":"info","ts":"2025-11-21T14:29:32.243038Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-11-21T14:29:32.243254Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.94.2:2380"}
{"level":"info","ts":"2025-11-21T14:29:32.243303Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.94.2:2380"}
{"level":"info","ts":"2025-11-21T14:29:32.24334Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"dfc97eb0aae75b33","initial-advertise-peer-urls":["https://192.168.94.2:2380"],"listen-peer-urls":["https://192.168.94.2:2380"],"advertise-client-urls":["https://192.168.94.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.94.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-21T14:29:32.24338Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-21T14:29:32.527604Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-21T14:29:32.527651Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-21T14:29:32.527692Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 received MsgPreVoteResp from dfc97eb0aae75b33 at term 1"}
{"level":"info","ts":"2025-11-21T14:29:32.527708Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became candidate at term 2"}
{"level":"info","ts":"2025-11-21T14:29:32.527717Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 received MsgVoteResp from dfc97eb0aae75b33 at term 2"}
{"level":"info","ts":"2025-11-21T14:29:32.527728Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became leader at term 2"}
{"level":"info","ts":"2025-11-21T14:29:32.527737Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: dfc97eb0aae75b33 elected leader dfc97eb0aae75b33 at term 2"}
{"level":"info","ts":"2025-11-21T14:29:32.529559Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"dfc97eb0aae75b33","local-member-attributes":"{Name:old-k8s-version-012258 ClientURLs:[https://192.168.94.2:2379]}","request-path":"/0/members/dfc97eb0aae75b33/attributes","cluster-id":"da400bbece288f5a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-21T14:29:32.529578Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-21T14:29:32.529669Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:29:32.529972Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-21T14:29:32.529994Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-21T14:29:32.529757Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-21T14:29:32.5309Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-21T14:29:32.531625Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"da400bbece288f5a","local-member-id":"dfc97eb0aae75b33","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:29:32.53516Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:29:32.535207Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:29:32.536282Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.94.2:2379"}
{"level":"info","ts":"2025-11-21T14:29:35.645599Z","caller":"traceutil/trace.go:171","msg":"trace[1619369888] transaction","detail":"{read_only:false; response_revision:181; number_of_response:1; }","duration":"103.859179ms","start":"2025-11-21T14:29:35.541719Z","end":"2025-11-21T14:29:35.645578Z","steps":["trace[1619369888] 'process raft request' (duration: 101.685301ms)"],"step_count":1}
==> kernel <==
14:30:18 up 1:12, 0 user, load average: 3.83, 3.02, 1.92
Linux old-k8s-version-012258 6.8.0-1044-gcp #47~22.04.1-Ubuntu SMP Thu Oct 23 21:07:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [14f62b42937d63a9d982189e10059fb863ccdf5ca3eedc2cdab43a2e258708b6] <==
I1121 14:29:54.836873 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1121 14:29:54.837124 1 main.go:139] hostIP = 192.168.94.2
podIP = 192.168.94.2
I1121 14:29:54.837288 1 main.go:148] setting mtu 1500 for CNI
I1121 14:29:54.837307 1 main.go:178] kindnetd IP family: "ipv4"
I1121 14:29:54.837325 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-21T14:29:55Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1121 14:29:55.132056 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1121 14:29:55.132129 1 controller.go:381] "Waiting for informer caches to sync"
I1121 14:29:55.132143 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1121 14:29:55.132319 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1121 14:29:55.432449 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1121 14:29:55.432473 1 metrics.go:72] Registering metrics
I1121 14:29:55.432525 1 controller.go:711] "Syncing nftables rules"
I1121 14:30:05.138150 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}]
I1121 14:30:05.138210 1 main.go:301] handling current node
I1121 14:30:15.134126 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}]
I1121 14:30:15.134169 1 main.go:301] handling current node
==> kube-apiserver [bff5755d3bb4c01170cea10eea2a0bd7eb5e4e85eff679e4fd11f262f20d8b28] <==
I1121 14:29:34.045351 1 cache.go:39] Caches are synced for AvailableConditionController controller
I1121 14:29:34.047124 1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
I1121 14:29:34.047217 1 shared_informer.go:318] Caches are synced for configmaps
I1121 14:29:34.051166 1 controller.go:624] quota admission added evaluator for: namespaces
I1121 14:29:34.059678 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1121 14:29:34.059713 1 aggregator.go:166] initial CRD sync complete...
I1121 14:29:34.059721 1 autoregister_controller.go:141] Starting autoregister controller
I1121 14:29:34.059728 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1121 14:29:34.059737 1 cache.go:39] Caches are synced for autoregister controller
I1121 14:29:34.239983 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1121 14:29:34.956388 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1121 14:29:34.961744 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1121 14:29:34.961779 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1121 14:29:35.529678 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1121 14:29:35.676651 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1121 14:29:35.776358 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1121 14:29:35.783426 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.94.2]
I1121 14:29:35.785070 1 controller.go:624] quota admission added evaluator for: endpoints
I1121 14:29:35.792737 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1121 14:29:35.992086 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1121 14:29:37.085397 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1121 14:29:37.099935 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1121 14:29:37.111942 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1121 14:29:50.620131 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1121 14:29:50.819999 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [9694941d5023471382cb75dbe0e35927477b046c67f0406d94b0c2eab9737245] <==
I1121 14:29:49.846641 1 shared_informer.go:318] Caches are synced for disruption
I1121 14:29:49.855897 1 shared_informer.go:318] Caches are synced for stateful set
I1121 14:29:49.881551 1 shared_informer.go:311] Waiting for caches to sync for garbage collector
I1121 14:29:49.969509 1 shared_informer.go:318] Caches are synced for attach detach
I1121 14:29:50.014167 1 shared_informer.go:318] Caches are synced for resource quota
I1121 14:29:50.025976 1 shared_informer.go:318] Caches are synced for resource quota
I1121 14:29:50.366198 1 shared_informer.go:318] Caches are synced for garbage collector
I1121 14:29:50.366669 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1121 14:29:50.381693 1 shared_informer.go:318] Caches are synced for garbage collector
I1121 14:29:50.624660 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1121 14:29:50.704235 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1121 14:29:50.830312 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-wsp2w"
I1121 14:29:50.831838 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-f6t7s"
I1121 14:29:50.927521 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-vst4c"
I1121 14:29:50.936234 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-qv6fz"
I1121 14:29:50.964100 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="339.351723ms"
I1121 14:29:50.978176 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-qv6fz"
I1121 14:29:50.986743 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="22.057827ms"
I1121 14:29:50.996010 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="8.927032ms"
I1121 14:29:50.996568 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="343.659µs"
I1121 14:30:05.215933 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="100.246µs"
I1121 14:30:05.230917 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="161.827µs"
I1121 14:30:06.296502 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="6.965394ms"
I1121 14:30:06.296638 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="91.82µs"
I1121 14:30:09.770369 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [7b9fdeac6c297da9e16ba05abceeee4a77258137fd28986a17f946713c8ad0fe] <==
I1121 14:29:51.457956 1 server_others.go:69] "Using iptables proxy"
I1121 14:29:51.467641 1 node.go:141] Successfully retrieved node IP: 192.168.94.2
I1121 14:29:51.489328 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1121 14:29:51.492051 1 server_others.go:152] "Using iptables Proxier"
I1121 14:29:51.492086 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1121 14:29:51.492094 1 server_others.go:438] "Defaulting to no-op detect-local"
I1121 14:29:51.492128 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1121 14:29:51.492424 1 server.go:846] "Version info" version="v1.28.0"
I1121 14:29:51.492443 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1121 14:29:51.493149 1 config.go:97] "Starting endpoint slice config controller"
I1121 14:29:51.493193 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1121 14:29:51.493154 1 config.go:188] "Starting service config controller"
I1121 14:29:51.493237 1 shared_informer.go:311] Waiting for caches to sync for service config
I1121 14:29:51.493237 1 config.go:315] "Starting node config controller"
I1121 14:29:51.493252 1 shared_informer.go:311] Waiting for caches to sync for node config
I1121 14:29:51.593782 1 shared_informer.go:318] Caches are synced for service config
I1121 14:29:51.593822 1 shared_informer.go:318] Caches are synced for node config
I1121 14:29:51.593799 1 shared_informer.go:318] Caches are synced for endpoint slice config
==> kube-scheduler [2ff2d15ad456d7eabe7dc6efd47603a67afa696fd1091b577b9633b6669bd9ec] <==
W1121 14:29:34.007803 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1121 14:29:34.007838 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1121 14:29:34.007899 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1121 14:29:34.007919 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W1121 14:29:34.904012 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1121 14:29:34.904113 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W1121 14:29:34.906819 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1121 14:29:34.906855 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1121 14:29:34.982047 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1121 14:29:34.982173 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1121 14:29:35.046771 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1121 14:29:35.046802 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W1121 14:29:35.065222 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1121 14:29:35.065262 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1121 14:29:35.119288 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1121 14:29:35.119329 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1121 14:29:35.148021 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1121 14:29:35.148079 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1121 14:29:35.156816 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1121 14:29:35.156866 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1121 14:29:35.323566 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1121 14:29:35.323609 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1121 14:29:35.347343 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1121 14:29:35.347400 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
I1121 14:29:38.002740 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 21 14:29:49 old-k8s-version-012258 kubelet[1516]: I1121 14:29:49.923571 1516 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.836162 1516 topology_manager.go:215] "Topology Admit Handler" podUID="bc079c02-40ff-4f10-947b-76f1e9784572" podNamespace="kube-system" podName="kube-proxy-wsp2w"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.839382 1516 topology_manager.go:215] "Topology Admit Handler" podUID="bd28a6b5-0214-42be-8883-1adf1217761c" podNamespace="kube-system" podName="kindnet-f6t7s"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.946858 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/bc079c02-40ff-4f10-947b-76f1e9784572-xtables-lock\") pod \"kube-proxy-wsp2w\" (UID: \"bc079c02-40ff-4f10-947b-76f1e9784572\") " pod="kube-system/kube-proxy-wsp2w"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.948665 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/bd28a6b5-0214-42be-8883-1adf1217761c-cni-cfg\") pod \"kindnet-f6t7s\" (UID: \"bd28a6b5-0214-42be-8883-1adf1217761c\") " pod="kube-system/kindnet-f6t7s"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.949046 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/bd28a6b5-0214-42be-8883-1adf1217761c-xtables-lock\") pod \"kindnet-f6t7s\" (UID: \"bd28a6b5-0214-42be-8883-1adf1217761c\") " pod="kube-system/kindnet-f6t7s"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.949101 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgrts\" (UniqueName: \"kubernetes.io/projected/bc079c02-40ff-4f10-947b-76f1e9784572-kube-api-access-vgrts\") pod \"kube-proxy-wsp2w\" (UID: \"bc079c02-40ff-4f10-947b-76f1e9784572\") " pod="kube-system/kube-proxy-wsp2w"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.950051 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/bd28a6b5-0214-42be-8883-1adf1217761c-lib-modules\") pod \"kindnet-f6t7s\" (UID: \"bd28a6b5-0214-42be-8883-1adf1217761c\") " pod="kube-system/kindnet-f6t7s"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.950176 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcpxl\" (UniqueName: \"kubernetes.io/projected/bd28a6b5-0214-42be-8883-1adf1217761c-kube-api-access-jcpxl\") pod \"kindnet-f6t7s\" (UID: \"bd28a6b5-0214-42be-8883-1adf1217761c\") " pod="kube-system/kindnet-f6t7s"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.950220 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/bc079c02-40ff-4f10-947b-76f1e9784572-kube-proxy\") pod \"kube-proxy-wsp2w\" (UID: \"bc079c02-40ff-4f10-947b-76f1e9784572\") " pod="kube-system/kube-proxy-wsp2w"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.950255 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/bc079c02-40ff-4f10-947b-76f1e9784572-lib-modules\") pod \"kube-proxy-wsp2w\" (UID: \"bc079c02-40ff-4f10-947b-76f1e9784572\") " pod="kube-system/kube-proxy-wsp2w"
Nov 21 14:29:55 old-k8s-version-012258 kubelet[1516]: I1121 14:29:55.257777 1516 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-wsp2w" podStartSLOduration=5.257722111 podCreationTimestamp="2025-11-21 14:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:29:52.247909441 +0000 UTC m=+15.198590387" watchObservedRunningTime="2025-11-21 14:29:55.257722111 +0000 UTC m=+18.208403071"
Nov 21 14:29:55 old-k8s-version-012258 kubelet[1516]: I1121 14:29:55.257917 1516 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-f6t7s" podStartSLOduration=2.158617096 podCreationTimestamp="2025-11-21 14:29:50 +0000 UTC" firstStartedPulling="2025-11-21 14:29:51.458699826 +0000 UTC m=+14.409380763" lastFinishedPulling="2025-11-21 14:29:54.557970689 +0000 UTC m=+17.508651626" observedRunningTime="2025-11-21 14:29:55.257276178 +0000 UTC m=+18.207957124" watchObservedRunningTime="2025-11-21 14:29:55.257887959 +0000 UTC m=+18.208568906"
Nov 21 14:30:05 old-k8s-version-012258 kubelet[1516]: I1121 14:30:05.191422 1516 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 21 14:30:05 old-k8s-version-012258 kubelet[1516]: I1121 14:30:05.216103 1516 topology_manager.go:215] "Topology Admit Handler" podUID="3ca4df79-d875-498c-91b8-059d4f975bd0" podNamespace="kube-system" podName="coredns-5dd5756b68-vst4c"
Nov 21 14:30:05 old-k8s-version-012258 kubelet[1516]: I1121 14:30:05.222388 1516 topology_manager.go:215] "Topology Admit Handler" podUID="4195d236-52f6-4bfd-b47a-9cd7cd89bedd" podNamespace="kube-system" podName="storage-provisioner"
Nov 21 14:30:05 old-k8s-version-012258 kubelet[1516]: I1121 14:30:05.242068 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cp9f\" (UniqueName: \"kubernetes.io/projected/3ca4df79-d875-498c-91b8-059d4f975bd0-kube-api-access-2cp9f\") pod \"coredns-5dd5756b68-vst4c\" (UID: \"3ca4df79-d875-498c-91b8-059d4f975bd0\") " pod="kube-system/coredns-5dd5756b68-vst4c"
Nov 21 14:30:05 old-k8s-version-012258 kubelet[1516]: I1121 14:30:05.242125 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69bsj\" (UniqueName: \"kubernetes.io/projected/4195d236-52f6-4bfd-b47a-9cd7cd89bedd-kube-api-access-69bsj\") pod \"storage-provisioner\" (UID: \"4195d236-52f6-4bfd-b47a-9cd7cd89bedd\") " pod="kube-system/storage-provisioner"
Nov 21 14:30:05 old-k8s-version-012258 kubelet[1516]: I1121 14:30:05.242163 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ca4df79-d875-498c-91b8-059d4f975bd0-config-volume\") pod \"coredns-5dd5756b68-vst4c\" (UID: \"3ca4df79-d875-498c-91b8-059d4f975bd0\") " pod="kube-system/coredns-5dd5756b68-vst4c"
Nov 21 14:30:05 old-k8s-version-012258 kubelet[1516]: I1121 14:30:05.242194 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/4195d236-52f6-4bfd-b47a-9cd7cd89bedd-tmp\") pod \"storage-provisioner\" (UID: \"4195d236-52f6-4bfd-b47a-9cd7cd89bedd\") " pod="kube-system/storage-provisioner"
Nov 21 14:30:06 old-k8s-version-012258 kubelet[1516]: I1121 14:30:06.278995 1516 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=16.278943202 podCreationTimestamp="2025-11-21 14:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:30:06.278908115 +0000 UTC m=+29.229589065" watchObservedRunningTime="2025-11-21 14:30:06.278943202 +0000 UTC m=+29.229624148"
Nov 21 14:30:06 old-k8s-version-012258 kubelet[1516]: I1121 14:30:06.289341 1516 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-vst4c" podStartSLOduration=16.289291859 podCreationTimestamp="2025-11-21 14:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:30:06.28907602 +0000 UTC m=+29.239756965" watchObservedRunningTime="2025-11-21 14:30:06.289291859 +0000 UTC m=+29.239972805"
Nov 21 14:30:08 old-k8s-version-012258 kubelet[1516]: I1121 14:30:08.218808 1516 topology_manager.go:215] "Topology Admit Handler" podUID="fa895e52-0bff-4604-8b62-fd0f087015e8" podNamespace="default" podName="busybox"
Nov 21 14:30:08 old-k8s-version-012258 kubelet[1516]: I1121 14:30:08.263005 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbpfl\" (UniqueName: \"kubernetes.io/projected/fa895e52-0bff-4604-8b62-fd0f087015e8-kube-api-access-cbpfl\") pod \"busybox\" (UID: \"fa895e52-0bff-4604-8b62-fd0f087015e8\") " pod="default/busybox"
Nov 21 14:30:11 old-k8s-version-012258 kubelet[1516]: I1121 14:30:11.294015 1516 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.035211506 podCreationTimestamp="2025-11-21 14:30:08 +0000 UTC" firstStartedPulling="2025-11-21 14:30:08.642848367 +0000 UTC m=+31.593529296" lastFinishedPulling="2025-11-21 14:30:10.901611757 +0000 UTC m=+33.852292703" observedRunningTime="2025-11-21 14:30:11.293488867 +0000 UTC m=+34.244169813" watchObservedRunningTime="2025-11-21 14:30:11.293974913 +0000 UTC m=+34.244655858"
==> storage-provisioner [4cd21f31974314e5db6d58ee50bbd67f0daf675c91355ac568f2d0140f7a8d6c] <==
I1121 14:30:05.736193 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1121 14:30:05.746379 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1121 14:30:05.746443 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1121 14:30:05.754349 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1121 14:30:05.754427 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"2ece7dbe-e611-46b3-879d-c0179ba2fde1", APIVersion:"v1", ResourceVersion:"432", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-012258_d783fa48-77b0-4408-a80f-68458be19abb became leader
I1121 14:30:05.754523 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-012258_d783fa48-77b0-4408-a80f-68458be19abb!
I1121 14:30:05.855459 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-012258_d783fa48-77b0-4408-a80f-68458be19abb!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-012258 -n old-k8s-version-012258
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-012258 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-012258
helpers_test.go:243: (dbg) docker inspect old-k8s-version-012258:
-- stdout --
[
{
"Id": "b631b0b0e9d5aafe8f15c853910a13c50352a0ccce9accbcd62a4ea770c78c5d",
"Created": "2025-11-21T14:29:18.305605728Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 251679,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-21T14:29:18.348841908Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:a368e3d71517ce17114afb6c9921965419df972dd0e2d32a9973a8946f0910a3",
"ResolvConfPath": "/var/lib/docker/containers/b631b0b0e9d5aafe8f15c853910a13c50352a0ccce9accbcd62a4ea770c78c5d/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/b631b0b0e9d5aafe8f15c853910a13c50352a0ccce9accbcd62a4ea770c78c5d/hostname",
"HostsPath": "/var/lib/docker/containers/b631b0b0e9d5aafe8f15c853910a13c50352a0ccce9accbcd62a4ea770c78c5d/hosts",
"LogPath": "/var/lib/docker/containers/b631b0b0e9d5aafe8f15c853910a13c50352a0ccce9accbcd62a4ea770c78c5d/b631b0b0e9d5aafe8f15c853910a13c50352a0ccce9accbcd62a4ea770c78c5d-json.log",
"Name": "/old-k8s-version-012258",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-012258:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-012258",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "b631b0b0e9d5aafe8f15c853910a13c50352a0ccce9accbcd62a4ea770c78c5d",
"LowerDir": "/var/lib/docker/overlay2/4ea3913a068d8b871d800eefdd7cdd11e4851e7b5031ea166038678d2b0108e1-init/diff:/var/lib/docker/overlay2/a649757dd9587fa5a20ca8a56ec1923099f2a5e912dc7e8e1dfa08e79248b59f/diff",
"MergedDir": "/var/lib/docker/overlay2/4ea3913a068d8b871d800eefdd7cdd11e4851e7b5031ea166038678d2b0108e1/merged",
"UpperDir": "/var/lib/docker/overlay2/4ea3913a068d8b871d800eefdd7cdd11e4851e7b5031ea166038678d2b0108e1/diff",
"WorkDir": "/var/lib/docker/overlay2/4ea3913a068d8b871d800eefdd7cdd11e4851e7b5031ea166038678d2b0108e1/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-012258",
"Source": "/var/lib/docker/volumes/old-k8s-version-012258/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-012258",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-012258",
"name.minikube.sigs.k8s.io": "old-k8s-version-012258",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "46765a8ec6da2ef06d0a63c5e792b68206b48e74aeaeb299bf506ff70e7dcffd",
"SandboxKey": "/var/run/docker/netns/46765a8ec6da",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33060"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33061"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33064"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33062"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33063"
}
]
},
"Networks": {
"old-k8s-version-012258": {
"IPAMConfig": {
"IPv4Address": "192.168.94.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "ecee753316979a1bb886a50ec401a80f6274b9bc39c4a8bb1732e91064c178b9",
"EndpointID": "c92e22445c114f178de1b5adf2a20b74000e44859ae25f57affa69d30eb60100",
"Gateway": "192.168.94.1",
"IPAddress": "192.168.94.2",
"MacAddress": "9e:cd:46:05:9b:55",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-012258",
"b631b0b0e9d5"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-012258 -n old-k8s-version-012258
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-012258 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-012258 logs -n 25: (1.24243583s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────────┬─────────┬─────────┬─────────────────────┬────────
─────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────────┼─────────┼─────────┼─────────────────────┼────────
─────────────┤
│ ssh │ -p cilium-459127 sudo systemctl cat containerd --no-pager │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ ssh │ -p cilium-459127 sudo cat /lib/systemd/system/containerd.service │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ ssh │ -p cilium-459127 sudo cat /etc/containerd/config.toml │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ ssh │ -p cilium-459127 sudo containerd config dump │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ ssh │ -p cilium-459127 sudo systemctl status crio --all --full --no-pager │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ delete │ -p cert-expiration-371956 │ cert-expiration-371956 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ 21 Nov 25 14:28 UTC │
│ ssh │ -p cilium-459127 sudo systemctl cat crio --no-pager │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ ssh │ -p cilium-459127 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ ssh │ -p cilium-459127 sudo crio config │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ delete │ -p cilium-459127 │ cilium-459127 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ 21 Nov 25 14:28 UTC │
│ start │ -p cert-options-733993 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-733993 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ 21 Nov 25 14:29 UTC │
│ start │ -p force-systemd-flag-730471 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-flag-730471 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ 21 Nov 25 14:29 UTC │
│ ssh │ -p NoKubernetes-187733 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-187733 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ │
│ stop │ -p NoKubernetes-187733 │ NoKubernetes-187733 │ jenkins │ v1.37.0 │ 21 Nov 25 14:28 UTC │ 21 Nov 25 14:29 UTC │
│ start │ -p NoKubernetes-187733 --driver=docker --container-runtime=containerd │ NoKubernetes-187733 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:29 UTC │
│ ssh │ -p NoKubernetes-187733 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-187733 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ │
│ delete │ -p NoKubernetes-187733 │ NoKubernetes-187733 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:29 UTC │
│ start │ -p old-k8s-version-012258 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-012258 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:30 UTC │
│ ssh │ cert-options-733993 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-733993 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:29 UTC │
│ ssh │ -p cert-options-733993 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-733993 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:29 UTC │
│ delete │ -p cert-options-733993 │ cert-options-733993 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:29 UTC │
│ start │ -p no-preload-921956 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ no-preload-921956 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:30 UTC │
│ ssh │ force-systemd-flag-730471 ssh cat /etc/containerd/config.toml │ force-systemd-flag-730471 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:29 UTC │
│ delete │ -p force-systemd-flag-730471 │ force-systemd-flag-730471 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:29 UTC │
│ start │ -p default-k8s-diff-port-376255 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ default-k8s-diff-port-376255 │ jenkins │ v1.37.0 │ 21 Nov 25 14:29 UTC │ 21 Nov 25 14:30 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────┴─────────┴─────────┴─────────────────────┴────────
─────────────┘
==> Last Start <==
Log file created at: 2025/11/21 14:29:24
Running on machine: ubuntu-20-agent-6
Binary: Built with gc go1.24.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1121 14:29:24.877938 255774 out.go:360] Setting OutFile to fd 1 ...
I1121 14:29:24.878133 255774 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1121 14:29:24.878179 255774 out.go:374] Setting ErrFile to fd 2...
I1121 14:29:24.878200 255774 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1121 14:29:24.879901 255774 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21847-11004/.minikube/bin
I1121 14:29:24.881344 255774 out.go:368] Setting JSON to false
I1121 14:29:24.883254 255774 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-6","uptime":4307,"bootTime":1763731058,"procs":299,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1121 14:29:24.883372 255774 start.go:143] virtualization: kvm guest
I1121 14:29:24.885483 255774 out.go:179] * [default-k8s-diff-port-376255] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1121 14:29:24.887201 255774 notify.go:221] Checking for updates...
I1121 14:29:24.887242 255774 out.go:179] - MINIKUBE_LOCATION=21847
I1121 14:29:24.890729 255774 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1121 14:29:24.892963 255774 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21847-11004/kubeconfig
I1121 14:29:24.894677 255774 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21847-11004/.minikube
I1121 14:29:24.897870 255774 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1121 14:29:24.899765 255774 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1121 14:29:24.902854 255774 config.go:182] Loaded profile config "kubernetes-upgrade-797080": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1121 14:29:24.903030 255774 config.go:182] Loaded profile config "no-preload-921956": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1121 14:29:24.903162 255774 config.go:182] Loaded profile config "old-k8s-version-012258": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1121 14:29:24.903312 255774 driver.go:422] Setting default libvirt URI to qemu:///system
I1121 14:29:24.939143 255774 docker.go:124] docker version: linux-29.0.2:Docker Engine - Community
I1121 14:29:24.939248 255774 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1121 14:29:25.025144 255774 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:80 OomKillDisable:false NGoroutines:92 SystemTime:2025-11-21 14:29:25.01035373 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x8
6_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652072448 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[ma
p[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1121 14:29:25.025295 255774 docker.go:319] overlay module found
I1121 14:29:25.027378 255774 out.go:179] * Using the docker driver based on user configuration
I1121 14:29:22.611340 249617 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-012258
I1121 14:29:22.611365 249617 ubuntu.go:182] provisioning hostname "old-k8s-version-012258"
I1121 14:29:22.611426 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:22.635589 249617 main.go:143] libmachine: Using SSH client type: native
I1121 14:29:22.635869 249617 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33060 <nil> <nil>}
I1121 14:29:22.635891 249617 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-012258 && echo "old-k8s-version-012258" | sudo tee /etc/hostname
I1121 14:29:22.796661 249617 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-012258
I1121 14:29:22.796754 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:22.822578 249617 main.go:143] libmachine: Using SSH client type: native
I1121 14:29:22.822834 249617 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33060 <nil> <nil>}
I1121 14:29:22.822860 249617 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-012258' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-012258/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-012258' | sudo tee -a /etc/hosts;
fi
fi
I1121 14:29:22.970644 249617 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1121 14:29:22.970676 249617 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21847-11004/.minikube CaCertPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21847-11004/.minikube}
I1121 14:29:22.970732 249617 ubuntu.go:190] setting up certificates
I1121 14:29:22.970743 249617 provision.go:84] configureAuth start
I1121 14:29:22.970826 249617 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-012258
I1121 14:29:22.991118 249617 provision.go:143] copyHostCerts
I1121 14:29:22.991183 249617 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-11004/.minikube/cert.pem, removing ...
I1121 14:29:22.991193 249617 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-11004/.minikube/cert.pem
I1121 14:29:22.991250 249617 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21847-11004/.minikube/cert.pem (1123 bytes)
I1121 14:29:22.991367 249617 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-11004/.minikube/key.pem, removing ...
I1121 14:29:22.991381 249617 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-11004/.minikube/key.pem
I1121 14:29:22.991414 249617 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21847-11004/.minikube/key.pem (1675 bytes)
I1121 14:29:22.991488 249617 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-11004/.minikube/ca.pem, removing ...
I1121 14:29:22.991499 249617 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-11004/.minikube/ca.pem
I1121 14:29:22.991526 249617 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21847-11004/.minikube/ca.pem (1078 bytes)
I1121 14:29:22.991627 249617 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21847-11004/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-012258 san=[127.0.0.1 192.168.94.2 localhost minikube old-k8s-version-012258]
I1121 14:29:23.140756 249617 provision.go:177] copyRemoteCerts
I1121 14:29:23.140833 249617 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1121 14:29:23.140885 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:23.161751 249617 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33060 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/old-k8s-version-012258/id_rsa Username:docker}
I1121 14:29:23.269718 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1121 14:29:23.292619 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1121 14:29:23.314336 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1121 14:29:23.337086 249617 provision.go:87] duration metric: took 366.309314ms to configureAuth
I1121 14:29:23.337129 249617 ubuntu.go:206] setting minikube options for container-runtime
I1121 14:29:23.337306 249617 config.go:182] Loaded profile config "old-k8s-version-012258": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1121 14:29:23.337320 249617 machine.go:97] duration metric: took 3.89496072s to provisionDockerMachine
I1121 14:29:23.337326 249617 client.go:176] duration metric: took 11.527957207s to LocalClient.Create
I1121 14:29:23.337344 249617 start.go:167] duration metric: took 11.528071392s to libmachine.API.Create "old-k8s-version-012258"
I1121 14:29:23.337352 249617 start.go:293] postStartSetup for "old-k8s-version-012258" (driver="docker")
I1121 14:29:23.337365 249617 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1121 14:29:23.337422 249617 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1121 14:29:23.337471 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:23.359217 249617 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33060 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/old-k8s-version-012258/id_rsa Username:docker}
I1121 14:29:23.466089 249617 ssh_runner.go:195] Run: cat /etc/os-release
I1121 14:29:23.470146 249617 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1121 14:29:23.470174 249617 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1121 14:29:23.470185 249617 filesync.go:126] Scanning /home/jenkins/minikube-integration/21847-11004/.minikube/addons for local assets ...
I1121 14:29:23.470249 249617 filesync.go:126] Scanning /home/jenkins/minikube-integration/21847-11004/.minikube/files for local assets ...
I1121 14:29:23.470349 249617 filesync.go:149] local asset: /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem -> 145232.pem in /etc/ssl/certs
I1121 14:29:23.470480 249617 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1121 14:29:23.479086 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem --> /etc/ssl/certs/145232.pem (1708 bytes)
I1121 14:29:23.506776 249617 start.go:296] duration metric: took 169.402964ms for postStartSetup
I1121 14:29:23.507166 249617 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-012258
I1121 14:29:23.527044 249617 profile.go:143] Saving config to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/config.json ...
I1121 14:29:23.527374 249617 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1121 14:29:23.527425 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:23.546669 249617 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33060 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/old-k8s-version-012258/id_rsa Username:docker}
I1121 14:29:23.645314 249617 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1121 14:29:23.650498 249617 start.go:128] duration metric: took 11.844529266s to createHost
I1121 14:29:23.650523 249617 start.go:83] releasing machines lock for "old-k8s-version-012258", held for 11.844683904s
I1121 14:29:23.650592 249617 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-012258
I1121 14:29:23.671161 249617 ssh_runner.go:195] Run: cat /version.json
I1121 14:29:23.671227 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:23.671321 249617 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1121 14:29:23.671403 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:23.694189 249617 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33060 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/old-k8s-version-012258/id_rsa Username:docker}
I1121 14:29:23.694196 249617 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33060 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/old-k8s-version-012258/id_rsa Username:docker}
I1121 14:29:23.856609 249617 ssh_runner.go:195] Run: systemctl --version
I1121 14:29:23.863273 249617 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1121 14:29:23.867917 249617 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1121 14:29:23.867991 249617 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1121 14:29:23.895679 249617 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1121 14:29:23.895707 249617 start.go:496] detecting cgroup driver to use...
I1121 14:29:23.895742 249617 detect.go:190] detected "systemd" cgroup driver on host os
I1121 14:29:23.895805 249617 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1121 14:29:23.911897 249617 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1121 14:29:23.925350 249617 docker.go:218] disabling cri-docker service (if available) ...
I1121 14:29:23.925400 249617 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1121 14:29:23.943424 249617 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1121 14:29:23.962675 249617 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1121 14:29:24.059689 249617 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1121 14:29:24.169263 249617 docker.go:234] disabling docker service ...
I1121 14:29:24.169325 249617 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1121 14:29:24.191949 249617 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1121 14:29:24.206181 249617 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1121 14:29:24.319402 249617 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1121 14:29:24.455060 249617 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1121 14:29:24.472888 249617 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1121 14:29:24.497138 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1121 14:29:24.524424 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1121 14:29:24.536491 249617 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1121 14:29:24.536702 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1121 14:29:24.547193 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1121 14:29:24.559919 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1121 14:29:24.571627 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1121 14:29:24.581977 249617 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1121 14:29:24.629839 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1121 14:29:24.640310 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1121 14:29:24.650595 249617 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1121 14:29:24.660801 249617 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1121 14:29:24.669493 249617 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1121 14:29:24.677810 249617 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:24.781513 249617 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1121 14:29:24.929576 249617 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1121 14:29:24.929707 249617 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1121 14:29:24.936782 249617 start.go:564] Will wait 60s for crictl version
I1121 14:29:24.936893 249617 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.942453 249617 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1121 14:29:24.986447 249617 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1121 14:29:24.986527 249617 ssh_runner.go:195] Run: containerd --version
I1121 14:29:25.018021 249617 ssh_runner.go:195] Run: containerd --version
I1121 14:29:25.051308 249617 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1121 14:29:25.029036 255774 start.go:309] selected driver: docker
I1121 14:29:25.029056 255774 start.go:930] validating driver "docker" against <nil>
I1121 14:29:25.029071 255774 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1121 14:29:25.029977 255774 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1121 14:29:25.123370 255774 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:80 OomKillDisable:false NGoroutines:92 SystemTime:2025-11-21 14:29:25.11156096 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x8
6_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652072448 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[ma
p[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1121 14:29:25.123696 255774 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1121 14:29:25.124078 255774 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1121 14:29:25.125758 255774 out.go:179] * Using Docker driver with root privileges
I1121 14:29:25.127166 255774 cni.go:84] Creating CNI manager for ""
I1121 14:29:25.127249 255774 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:29:25.127262 255774 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1121 14:29:25.127353 255774 start.go:353] cluster config:
{Name:default-k8s-diff-port-376255 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-376255 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:
cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath:
StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1121 14:29:25.129454 255774 out.go:179] * Starting "default-k8s-diff-port-376255" primary control-plane node in "default-k8s-diff-port-376255" cluster
I1121 14:29:25.130961 255774 cache.go:134] Beginning downloading kic base image for docker with containerd
I1121 14:29:25.132637 255774 out.go:179] * Pulling base image v0.0.48-1763507788-21924 ...
I1121 14:29:25.134190 255774 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1121 14:29:25.134237 255774 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4
I1121 14:29:25.134251 255774 cache.go:65] Caching tarball of preloaded images
I1121 14:29:25.134262 255774 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a in local docker daemon
I1121 14:29:25.134379 255774 preload.go:238] Found /home/jenkins/minikube-integration/21847-11004/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4 in cache, skipping download
I1121 14:29:25.134391 255774 cache.go:68] Finished verifying existence of preloaded tar for v1.34.1 on containerd
I1121 14:29:25.134520 255774 profile.go:143] Saving config to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/config.json ...
I1121 14:29:25.134560 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/config.json: {Name:mk1db0ba6952ac549a7eae06783e73916a7ad392 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:25.161339 255774 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a in local docker daemon, skipping pull
I1121 14:29:25.161363 255774 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a exists in daemon, skipping load
I1121 14:29:25.161384 255774 cache.go:243] Successfully downloaded all kic artifacts
I1121 14:29:25.161419 255774 start.go:360] acquireMachinesLock for default-k8s-diff-port-376255: {Name:mka18b3ecaec4bae205bc7951f90400738bef300 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1121 14:29:25.161518 255774 start.go:364] duration metric: took 79.824µs to acquireMachinesLock for "default-k8s-diff-port-376255"
I1121 14:29:25.161561 255774 start.go:93] Provisioning new machine with config: &{Name:default-k8s-diff-port-376255 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-376255 Namespace:default API
ServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Disabl
eCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1121 14:29:25.161653 255774 start.go:125] createHost starting for "" (driver="docker")
I1121 14:29:25.055066 249617 cli_runner.go:164] Run: docker network inspect old-k8s-version-012258 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1121 14:29:25.085953 249617 ssh_runner.go:195] Run: grep 192.168.94.1 host.minikube.internal$ /etc/hosts
I1121 14:29:25.093859 249617 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.94.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1121 14:29:25.111432 249617 kubeadm.go:884] updating cluster {Name:old-k8s-version-012258 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-012258 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.94.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1121 14:29:25.111671 249617 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1121 14:29:25.111753 249617 ssh_runner.go:195] Run: sudo crictl images --output json
I1121 14:29:25.143860 249617 containerd.go:627] all images are preloaded for containerd runtime.
I1121 14:29:25.143888 249617 containerd.go:534] Images already preloaded, skipping extraction
I1121 14:29:25.143953 249617 ssh_runner.go:195] Run: sudo crictl images --output json
I1121 14:29:25.174770 249617 containerd.go:627] all images are preloaded for containerd runtime.
I1121 14:29:25.174789 249617 cache_images.go:86] Images are preloaded, skipping loading
I1121 14:29:25.174797 249617 kubeadm.go:935] updating node { 192.168.94.2 8443 v1.28.0 containerd true true} ...
I1121 14:29:25.174897 249617 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-012258 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.94.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-012258 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1121 14:29:25.174970 249617 ssh_runner.go:195] Run: sudo crictl info
I1121 14:29:25.211311 249617 cni.go:84] Creating CNI manager for ""
I1121 14:29:25.211341 249617 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:29:25.211371 249617 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1121 14:29:25.211401 249617 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.94.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-012258 NodeName:old-k8s-version-012258 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.94.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.94.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt S
taticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1121 14:29:25.211596 249617 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.94.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-012258"
kubeletExtraArgs:
node-ip: 192.168.94.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.94.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1121 14:29:25.211673 249617 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1121 14:29:25.224124 249617 binaries.go:51] Found k8s binaries, skipping transfer
I1121 14:29:25.224202 249617 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1121 14:29:25.235430 249617 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1121 14:29:25.254181 249617 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1121 14:29:25.283842 249617 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2175 bytes)
I1121 14:29:25.302971 249617 ssh_runner.go:195] Run: grep 192.168.94.2 control-plane.minikube.internal$ /etc/hosts
I1121 14:29:25.309092 249617 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.94.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1121 14:29:25.325170 249617 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:25.438037 249617 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:29:25.469767 249617 certs.go:69] Setting up /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258 for IP: 192.168.94.2
I1121 14:29:25.469790 249617 certs.go:195] generating shared ca certs ...
I1121 14:29:25.469811 249617 certs.go:227] acquiring lock for ca certs: {Name:mk4ac68319839cd6684afc66121341297238277f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:25.470023 249617 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21847-11004/.minikube/ca.key
I1121 14:29:25.470095 249617 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.key
I1121 14:29:25.470105 249617 certs.go:257] generating profile certs ...
I1121 14:29:25.470177 249617 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/client.key
I1121 14:29:25.470199 249617 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/client.crt with IP's: []
I1121 14:29:25.634340 249617 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/client.crt ...
I1121 14:29:25.634374 249617 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/client.crt: {Name:mk5e1a3132436dad740351857d527e3c45fff4e4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:25.648586 249617 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/client.key ...
I1121 14:29:25.648625 249617 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/client.key: {Name:mk757010d91a13b26eb1340def496546bee9bf26 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:25.648791 249617 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.key.a13049cc
I1121 14:29:25.648816 249617 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.crt.a13049cc with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.94.2]
I1121 14:29:25.817862 249617 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.crt.a13049cc ...
I1121 14:29:25.817892 249617 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.crt.a13049cc: {Name:mk8a482343e99af6e8bdd7e52a6e5b813685beb7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:25.818099 249617 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.key.a13049cc ...
I1121 14:29:25.818121 249617 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.key.a13049cc: {Name:mk4cf761e884b2a77e105e39ad6b0495b59b5aee Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:25.818237 249617 certs.go:382] copying /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.crt.a13049cc -> /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.crt
I1121 14:29:25.818331 249617 certs.go:386] copying /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.key.a13049cc -> /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.key
I1121 14:29:25.818390 249617 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.key
I1121 14:29:25.818406 249617 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.crt with IP's: []
I1121 14:29:26.390351 249617 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.crt ...
I1121 14:29:26.390391 249617 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.crt: {Name:mk37207f300780275f6aa5331fc436d60739196c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:26.390599 249617 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.key ...
I1121 14:29:26.390617 249617 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.key: {Name:mkff5d416178c38a50235608b783c3957bee8456 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:26.390849 249617 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523.pem (1338 bytes)
W1121 14:29:26.390898 249617 certs.go:480] ignoring /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523_empty.pem, impossibly tiny 0 bytes
I1121 14:29:26.390913 249617 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca-key.pem (1675 bytes)
I1121 14:29:26.390946 249617 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem (1078 bytes)
I1121 14:29:26.390988 249617 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem (1123 bytes)
I1121 14:29:26.391029 249617 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/key.pem (1675 bytes)
I1121 14:29:26.391086 249617 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem (1708 bytes)
I1121 14:29:26.391817 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1121 14:29:26.418450 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I1121 14:29:26.446063 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1121 14:29:26.469197 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1121 14:29:26.493823 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1121 14:29:26.526847 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1121 14:29:26.555176 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1121 14:29:25.915600 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:29:25.916118 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:29:25.916177 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:29:25.916228 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:29:25.948057 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:25.948080 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:25.948087 213058 cri.go:89] found id: ""
I1121 14:29:25.948096 213058 logs.go:282] 2 containers: [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:29:25.948160 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:25.952634 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:25.956801 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:29:25.956870 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:29:25.990988 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:25.991014 213058 cri.go:89] found id: ""
I1121 14:29:25.991024 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:29:25.991083 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:25.995665 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:29:25.995736 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:29:26.031577 213058 cri.go:89] found id: ""
I1121 14:29:26.031604 213058 logs.go:282] 0 containers: []
W1121 14:29:26.031612 213058 logs.go:284] No container was found matching "coredns"
I1121 14:29:26.031618 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:29:26.031665 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:29:26.064880 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:26.064907 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:26.064912 213058 cri.go:89] found id: ""
I1121 14:29:26.064922 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:29:26.064979 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:26.070274 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:26.075659 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:29:26.075731 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:29:26.108079 213058 cri.go:89] found id: ""
I1121 14:29:26.108108 213058 logs.go:282] 0 containers: []
W1121 14:29:26.108118 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:29:26.108125 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:29:26.108181 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:29:26.138988 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:26.139018 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:26.139024 213058 cri.go:89] found id: ""
I1121 14:29:26.139034 213058 logs.go:282] 2 containers: [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:29:26.139096 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:26.143487 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:26.147564 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:29:26.147631 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:29:26.185747 213058 cri.go:89] found id: ""
I1121 14:29:26.185774 213058 logs.go:282] 0 containers: []
W1121 14:29:26.185785 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:29:26.185793 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:29:26.185848 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:29:26.220265 213058 cri.go:89] found id: ""
I1121 14:29:26.220296 213058 logs.go:282] 0 containers: []
W1121 14:29:26.220308 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:29:26.220321 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:29:26.220335 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:26.265042 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:29:26.265072 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:29:26.402636 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:29:26.402672 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:29:26.484531 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:29:26.484565 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:29:26.484581 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:26.534239 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:29:26.534294 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:26.579971 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:29:26.580016 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:29:26.643693 213058 logs.go:123] Gathering logs for container status ...
I1121 14:29:26.643727 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:29:26.683712 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:29:26.683748 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:29:26.702800 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:29:26.702836 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:26.741813 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:29:26.741845 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:26.812944 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:29:26.812997 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:26.855307 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:29:26.855347 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:24.308535 252125 containerd.go:267] Checking existence of image with name "registry.k8s.io/coredns/coredns:v1.12.1" and sha "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969"
I1121 14:29:24.308619 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/coredns/coredns:v1.12.1
I1121 14:29:24.317176 252125 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.34.1" and sha "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7"
I1121 14:29:24.317245 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-proxy:v1.34.1
I1121 14:29:24.318774 252125 cache_images.go:118] "registry.k8s.io/etcd:3.6.4-0" needs transfer: "registry.k8s.io/etcd:3.6.4-0" does not exist at hash "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115" in container runtime
I1121 14:29:24.318825 252125 cri.go:218] Removing image: registry.k8s.io/etcd:3.6.4-0
I1121 14:29:24.318867 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.328208 252125 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.34.1" and sha "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f"
I1121 14:29:24.328249 252125 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.34.1" and sha "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813"
I1121 14:29:24.328291 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-controller-manager:v1.34.1
I1121 14:29:24.328305 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-scheduler:v1.34.1
I1121 14:29:24.328664 252125 containerd.go:267] Checking existence of image with name "registry.k8s.io/pause:3.10.1" and sha "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f"
I1121 14:29:24.328708 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/pause:3.10.1
I1121 14:29:24.335839 252125 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.34.1" and sha "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97"
I1121 14:29:24.335900 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-apiserver:v1.34.1
I1121 14:29:24.337631 252125 cache_images.go:118] "registry.k8s.io/coredns/coredns:v1.12.1" needs transfer: "registry.k8s.io/coredns/coredns:v1.12.1" does not exist at hash "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969" in container runtime
I1121 14:29:24.337672 252125 cri.go:218] Removing image: registry.k8s.io/coredns/coredns:v1.12.1
I1121 14:29:24.337713 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.346363 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1121 14:29:24.346443 252125 cache_images.go:118] "registry.k8s.io/kube-proxy:v1.34.1" needs transfer: "registry.k8s.io/kube-proxy:v1.34.1" does not exist at hash "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7" in container runtime
I1121 14:29:24.346484 252125 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.34.1
I1121 14:29:24.346517 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.361284 252125 cache_images.go:118] "registry.k8s.io/kube-controller-manager:v1.34.1" needs transfer: "registry.k8s.io/kube-controller-manager:v1.34.1" does not exist at hash "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f" in container runtime
I1121 14:29:24.361331 252125 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.34.1
I1121 14:29:24.361375 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.361424 252125 cache_images.go:118] "registry.k8s.io/kube-scheduler:v1.34.1" needs transfer: "registry.k8s.io/kube-scheduler:v1.34.1" does not exist at hash "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813" in container runtime
I1121 14:29:24.361445 252125 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.34.1
I1121 14:29:24.361477 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.366787 252125 cache_images.go:118] "registry.k8s.io/pause:3.10.1" needs transfer: "registry.k8s.io/pause:3.10.1" does not exist at hash "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f" in container runtime
I1121 14:29:24.366831 252125 cri.go:218] Removing image: registry.k8s.io/pause:3.10.1
I1121 14:29:24.366871 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.379457 252125 cache_images.go:118] "registry.k8s.io/kube-apiserver:v1.34.1" needs transfer: "registry.k8s.io/kube-apiserver:v1.34.1" does not exist at hash "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97" in container runtime
I1121 14:29:24.379503 252125 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.34.1
I1121 14:29:24.379558 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:24.379677 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1121 14:29:24.388569 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1121 14:29:24.388608 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1121 14:29:24.388658 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1121 14:29:24.388681 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1121 14:29:24.388574 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1121 14:29:24.418705 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1121 14:29:24.418763 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1121 14:29:24.427350 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1121 14:29:24.434639 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1121 14:29:24.434777 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1121 14:29:24.437430 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1121 14:29:24.437452 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1121 14:29:24.477986 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1121 14:29:24.478027 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1121 14:29:24.478099 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1121 14:29:24.478334 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0
I1121 14:29:24.478136 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1121 14:29:24.485019 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1121 14:29:24.485026 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1121 14:29:24.489362 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1121 14:29:24.521124 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1121 14:29:24.521651 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1121 14:29:24.521767 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1
I1121 14:29:24.553384 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/etcd_3.6.4-0: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/etcd_3.6.4-0': No such file or directory
I1121 14:29:24.553425 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 --> /var/lib/minikube/images/etcd_3.6.4-0 (74320896 bytes)
I1121 14:29:24.553522 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1121 14:29:24.553632 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1
I1121 14:29:24.553699 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1121 14:29:24.553755 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1
I1121 14:29:24.553769 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1121 14:29:24.553803 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1121 14:29:24.553853 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1
I1121 14:29:24.553860 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1
I1121 14:29:24.553893 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1121 14:29:24.553920 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/coredns_v1.12.1: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/coredns_v1.12.1': No such file or directory
I1121 14:29:24.553945 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1121 14:29:24.553945 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 --> /var/lib/minikube/images/coredns_v1.12.1 (22394368 bytes)
I1121 14:29:24.565027 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-proxy_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-proxy_v1.34.1': No such file or directory
I1121 14:29:24.565077 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 --> /var/lib/minikube/images/kube-proxy_v1.34.1 (25966080 bytes)
I1121 14:29:24.565153 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-apiserver_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-apiserver_v1.34.1': No such file or directory
I1121 14:29:24.565169 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 --> /var/lib/minikube/images/kube-apiserver_v1.34.1 (27073024 bytes)
I1121 14:29:24.574297 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-controller-manager_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-controller-manager_v1.34.1': No such file or directory
I1121 14:29:24.574338 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 --> /var/lib/minikube/images/kube-controller-manager_v1.34.1 (22831104 bytes)
I1121 14:29:24.574363 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/pause_3.10.1: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/pause_3.10.1': No such file or directory
I1121 14:29:24.574390 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-scheduler_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-scheduler_v1.34.1': No such file or directory
I1121 14:29:24.574393 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 --> /var/lib/minikube/images/pause_3.10.1 (321024 bytes)
I1121 14:29:24.574407 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 --> /var/lib/minikube/images/kube-scheduler_v1.34.1 (17396736 bytes)
I1121 14:29:24.784169 252125 containerd.go:285] Loading image: /var/lib/minikube/images/pause_3.10.1
I1121 14:29:24.784246 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/pause_3.10.1
I1121 14:29:24.964305 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 from cache
I1121 14:29:25.029557 252125 containerd.go:285] Loading image: /var/lib/minikube/images/kube-scheduler_v1.34.1
I1121 14:29:25.029626 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1
I1121 14:29:25.445459 252125 containerd.go:267] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562"
I1121 14:29:25.445578 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:26.691152 252125 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1: (1.661495413s)
I1121 14:29:26.691188 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 from cache
I1121 14:29:26.691209 252125 containerd.go:285] Loading image: /var/lib/minikube/images/coredns_v1.12.1
I1121 14:29:26.691206 252125 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5: (1.245604103s)
I1121 14:29:26.691250 252125 cache_images.go:118] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562" in container runtime
I1121 14:29:26.691264 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1
I1121 14:29:26.691297 252125 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:26.691347 252125 ssh_runner.go:195] Run: which crictl
I1121 14:29:26.696141 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:28.100615 252125 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.404441617s)
I1121 14:29:28.100696 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:28.100615 252125 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1: (1.409327822s)
I1121 14:29:28.100767 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 from cache
I1121 14:29:28.100803 252125 containerd.go:285] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1121 14:29:28.100853 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1121 14:29:28.132780 252125 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:25.163849 255774 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1121 14:29:25.164318 255774 start.go:159] libmachine.API.Create for "default-k8s-diff-port-376255" (driver="docker")
I1121 14:29:25.164395 255774 client.go:173] LocalClient.Create starting
I1121 14:29:25.164513 255774 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem
I1121 14:29:25.164575 255774 main.go:143] libmachine: Decoding PEM data...
I1121 14:29:25.164605 255774 main.go:143] libmachine: Parsing certificate...
I1121 14:29:25.164704 255774 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem
I1121 14:29:25.164760 255774 main.go:143] libmachine: Decoding PEM data...
I1121 14:29:25.164776 255774 main.go:143] libmachine: Parsing certificate...
I1121 14:29:25.165330 255774 cli_runner.go:164] Run: docker network inspect default-k8s-diff-port-376255 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1121 14:29:25.188513 255774 cli_runner.go:211] docker network inspect default-k8s-diff-port-376255 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1121 14:29:25.188614 255774 network_create.go:284] running [docker network inspect default-k8s-diff-port-376255] to gather additional debugging logs...
I1121 14:29:25.188640 255774 cli_runner.go:164] Run: docker network inspect default-k8s-diff-port-376255
W1121 14:29:25.213297 255774 cli_runner.go:211] docker network inspect default-k8s-diff-port-376255 returned with exit code 1
I1121 14:29:25.213338 255774 network_create.go:287] error running [docker network inspect default-k8s-diff-port-376255]: docker network inspect default-k8s-diff-port-376255: exit status 1
stdout:
[]
stderr:
Error response from daemon: network default-k8s-diff-port-376255 not found
I1121 14:29:25.213435 255774 network_create.go:289] output of [docker network inspect default-k8s-diff-port-376255]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network default-k8s-diff-port-376255 not found
** /stderr **
I1121 14:29:25.213589 255774 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1121 14:29:25.240844 255774 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-66cfc06dc768 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:3a:44:28:22:82:94} reservation:<nil>}
I1121 14:29:25.241874 255774 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-39921db0d513 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:76:e4:85:98:a5:e3} reservation:<nil>}
I1121 14:29:25.242975 255774 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-36a8741c90a2 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:4a:21:99:72:63:4a} reservation:<nil>}
I1121 14:29:25.244042 255774 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-63d543fc8bbd IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:c2:58:40:d2:33:c4} reservation:<nil>}
I1121 14:29:25.245269 255774 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001eb46e0}
I1121 14:29:25.245303 255774 network_create.go:124] attempt to create docker network default-k8s-diff-port-376255 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1121 14:29:25.245384 255774 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=default-k8s-diff-port-376255 default-k8s-diff-port-376255
I1121 14:29:25.322210 255774 network_create.go:108] docker network default-k8s-diff-port-376255 192.168.85.0/24 created
I1121 14:29:25.322244 255774 kic.go:121] calculated static IP "192.168.85.2" for the "default-k8s-diff-port-376255" container
I1121 14:29:25.322309 255774 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1121 14:29:25.346732 255774 cli_runner.go:164] Run: docker volume create default-k8s-diff-port-376255 --label name.minikube.sigs.k8s.io=default-k8s-diff-port-376255 --label created_by.minikube.sigs.k8s.io=true
I1121 14:29:25.374919 255774 oci.go:103] Successfully created a docker volume default-k8s-diff-port-376255
I1121 14:29:25.374994 255774 cli_runner.go:164] Run: docker run --rm --name default-k8s-diff-port-376255-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=default-k8s-diff-port-376255 --entrypoint /usr/bin/test -v default-k8s-diff-port-376255:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a -d /var/lib
I1121 14:29:26.343288 255774 oci.go:107] Successfully prepared a docker volume default-k8s-diff-port-376255
I1121 14:29:26.343370 255774 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1121 14:29:26.343387 255774 kic.go:194] Starting extracting preloaded images to volume ...
I1121 14:29:26.343457 255774 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21847-11004/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v default-k8s-diff-port-376255:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a -I lz4 -xf /preloaded.tar -C /extractDir
I1121 14:29:26.582319 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/old-k8s-version-012258/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1121 14:29:26.606403 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523.pem --> /usr/share/ca-certificates/14523.pem (1338 bytes)
I1121 14:29:26.635408 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem --> /usr/share/ca-certificates/145232.pem (1708 bytes)
I1121 14:29:26.661287 249617 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1121 14:29:26.686582 249617 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1121 14:29:26.703157 249617 ssh_runner.go:195] Run: openssl version
I1121 14:29:26.712353 249617 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/14523.pem && ln -fs /usr/share/ca-certificates/14523.pem /etc/ssl/certs/14523.pem"
I1121 14:29:26.725593 249617 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/14523.pem
I1121 14:29:26.732381 249617 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 21 14:01 /usr/share/ca-certificates/14523.pem
I1121 14:29:26.732523 249617 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/14523.pem
I1121 14:29:26.774823 249617 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/14523.pem /etc/ssl/certs/51391683.0"
I1121 14:29:26.785127 249617 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/145232.pem && ln -fs /usr/share/ca-certificates/145232.pem /etc/ssl/certs/145232.pem"
I1121 14:29:26.796035 249617 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/145232.pem
I1121 14:29:26.800685 249617 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 21 14:01 /usr/share/ca-certificates/145232.pem
I1121 14:29:26.800751 249617 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/145232.pem
I1121 14:29:26.842185 249617 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/145232.pem /etc/ssl/certs/3ec20f2e.0"
I1121 14:29:26.852632 249617 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1121 14:29:26.863838 249617 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:26.869571 249617 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 21 13:56 /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:26.869642 249617 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:26.922017 249617 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1121 14:29:26.934065 249617 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1121 14:29:26.939457 249617 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1121 14:29:26.939526 249617 kubeadm.go:401] StartCluster: {Name:old-k8s-version-012258 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-012258 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.94.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1121 14:29:26.939648 249617 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1121 14:29:26.939710 249617 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1121 14:29:26.978114 249617 cri.go:89] found id: ""
I1121 14:29:26.978192 249617 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1121 14:29:26.989363 249617 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1121 14:29:27.000529 249617 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1121 14:29:27.000603 249617 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1121 14:29:27.012158 249617 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1121 14:29:27.012179 249617 kubeadm.go:158] found existing configuration files:
I1121 14:29:27.012231 249617 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1121 14:29:27.022084 249617 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1121 14:29:27.022141 249617 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1121 14:29:27.034139 249617 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1121 14:29:27.044897 249617 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1121 14:29:27.045038 249617 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1121 14:29:27.056593 249617 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1121 14:29:27.066532 249617 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1121 14:29:27.066615 249617 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1121 14:29:27.077925 249617 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1121 14:29:27.088254 249617 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1121 14:29:27.088320 249617 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1121 14:29:27.098442 249617 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1121 14:29:27.205509 249617 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1121 14:29:27.290009 249617 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1121 14:29:29.388121 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:29:29.388594 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:29:29.388645 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:29:29.388690 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:29:29.416964 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:29.416991 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:29.416996 213058 cri.go:89] found id: ""
I1121 14:29:29.417006 213058 logs.go:282] 2 containers: [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:29:29.417074 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:29.421476 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:29.425483 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:29:29.425557 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:29:29.453687 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:29.453708 213058 cri.go:89] found id: ""
I1121 14:29:29.453718 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:29:29.453783 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:29.458267 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:29:29.458353 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:29:29.485804 213058 cri.go:89] found id: ""
I1121 14:29:29.485865 213058 logs.go:282] 0 containers: []
W1121 14:29:29.485876 213058 logs.go:284] No container was found matching "coredns"
I1121 14:29:29.485883 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:29:29.485940 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:29:29.514265 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:29.514290 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:29.514294 213058 cri.go:89] found id: ""
I1121 14:29:29.514302 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:29:29.514349 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:29.518626 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:29.522446 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:29:29.522501 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:29:29.549770 213058 cri.go:89] found id: ""
I1121 14:29:29.549799 213058 logs.go:282] 0 containers: []
W1121 14:29:29.549811 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:29:29.549819 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:29:29.549868 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:29:29.577193 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:29.577217 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:29.577222 213058 cri.go:89] found id: ""
I1121 14:29:29.577230 213058 logs.go:282] 2 containers: [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:29:29.577288 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:29.581256 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:29.585291 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:29:29.585347 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:29:29.614632 213058 cri.go:89] found id: ""
I1121 14:29:29.614664 213058 logs.go:282] 0 containers: []
W1121 14:29:29.614674 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:29:29.614682 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:29:29.614740 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:29:29.645697 213058 cri.go:89] found id: ""
I1121 14:29:29.645721 213058 logs.go:282] 0 containers: []
W1121 14:29:29.645730 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:29:29.645741 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:29:29.645756 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:29.675578 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:29:29.675607 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:29:29.718952 213058 logs.go:123] Gathering logs for container status ...
I1121 14:29:29.718990 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:29:29.750089 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:29:29.750117 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:29:29.858708 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:29:29.858738 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:29.902976 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:29:29.903013 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:29.938083 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:29:29.938118 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:29.976329 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:29:29.976366 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:29:29.991448 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:29:29.991485 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:29:30.053990 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:29:30.054015 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:29:30.054032 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:30.089042 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:29:30.089076 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:30.124498 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:29:30.124528 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:32.685601 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:29:32.686035 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:29:32.686089 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:29:32.686144 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:29:32.744948 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:32.745095 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:32.745132 213058 cri.go:89] found id: ""
I1121 14:29:32.745169 213058 logs.go:282] 2 containers: [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:29:32.745355 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:32.752020 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:32.760837 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:29:32.761106 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:29:32.807418 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:32.807451 213058 cri.go:89] found id: ""
I1121 14:29:32.807462 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:29:32.807521 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:32.813216 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:29:32.813289 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:29:32.852598 213058 cri.go:89] found id: ""
I1121 14:29:32.852633 213058 logs.go:282] 0 containers: []
W1121 14:29:32.852645 213058 logs.go:284] No container was found matching "coredns"
I1121 14:29:32.852653 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:29:32.852711 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:29:32.889120 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:32.889144 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:32.889148 213058 cri.go:89] found id: ""
I1121 14:29:32.889157 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:29:32.889211 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:32.894834 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:32.900572 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:29:32.900646 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:29:32.937810 213058 cri.go:89] found id: ""
I1121 14:29:32.937836 213058 logs.go:282] 0 containers: []
W1121 14:29:32.937846 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:29:32.937853 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:29:32.937914 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:29:32.975713 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:32.975735 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:32.975741 213058 cri.go:89] found id: ""
I1121 14:29:32.975751 213058 logs.go:282] 2 containers: [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:29:32.975815 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:32.981574 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:32.985965 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:29:32.986030 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:29:33.019894 213058 cri.go:89] found id: ""
I1121 14:29:33.019923 213058 logs.go:282] 0 containers: []
W1121 14:29:33.019935 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:29:33.019949 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:29:33.020009 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:29:33.051872 213058 cri.go:89] found id: ""
I1121 14:29:33.051901 213058 logs.go:282] 0 containers: []
W1121 14:29:33.051911 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:29:33.051923 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:29:33.051937 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:33.103114 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:29:33.103153 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:33.142816 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:29:33.142846 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:29:33.209677 213058 logs.go:123] Gathering logs for container status ...
I1121 14:29:33.209736 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:29:33.255185 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:29:33.255220 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:29:33.272562 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:29:33.272600 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:33.319098 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:29:33.319132 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:33.366245 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:29:33.366286 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:33.410624 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:29:33.410660 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:33.458217 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:29:33.458253 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:29:33.586879 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:29:33.586919 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1121 14:29:29.835800 252125 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1: (1.734910291s)
I1121 14:29:29.835838 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 from cache
I1121 14:29:29.835860 252125 containerd.go:285] Loading image: /var/lib/minikube/images/kube-proxy_v1.34.1
I1121 14:29:29.835902 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1
I1121 14:29:29.835802 252125 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.702989246s)
I1121 14:29:29.835965 252125 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5
I1121 14:29:29.836056 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5
I1121 14:29:29.840842 252125 ssh_runner.go:352] existence check for /var/lib/minikube/images/storage-provisioner_v5: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/storage-provisioner_v5': No such file or directory
I1121 14:29:29.840873 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 --> /var/lib/minikube/images/storage-provisioner_v5 (9060352 bytes)
I1121 14:29:32.866902 252125 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1: (3.030968163s)
I1121 14:29:32.866941 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 from cache
I1121 14:29:32.866961 252125 containerd.go:285] Loading image: /var/lib/minikube/images/kube-apiserver_v1.34.1
I1121 14:29:32.867002 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1
I1121 14:29:31.901829 255774 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21847-11004/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v default-k8s-diff-port-376255:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a -I lz4 -xf /preloaded.tar -C /extractDir: (5.558304176s)
I1121 14:29:31.901864 255774 kic.go:203] duration metric: took 5.558473353s to extract preloaded images to volume ...
W1121 14:29:31.901941 255774 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1121 14:29:31.901969 255774 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1121 14:29:31.902010 255774 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1121 14:29:31.985847 255774 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname default-k8s-diff-port-376255 --name default-k8s-diff-port-376255 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=default-k8s-diff-port-376255 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=default-k8s-diff-port-376255 --network default-k8s-diff-port-376255 --ip 192.168.85.2 --volume default-k8s-diff-port-376255:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8444 --publish=127.0.0.1::8444 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a
I1121 14:29:32.403824 255774 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-376255 --format={{.State.Running}}
I1121 14:29:32.427802 255774 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-376255 --format={{.State.Status}}
I1121 14:29:32.456228 255774 cli_runner.go:164] Run: docker exec default-k8s-diff-port-376255 stat /var/lib/dpkg/alternatives/iptables
I1121 14:29:32.514766 255774 oci.go:144] the created container "default-k8s-diff-port-376255" has a running status.
I1121 14:29:32.514799 255774 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa...
I1121 14:29:32.829505 255774 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1121 14:29:32.861911 255774 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-376255 --format={{.State.Status}}
I1121 14:29:32.888316 255774 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1121 14:29:32.888342 255774 kic_runner.go:114] Args: [docker exec --privileged default-k8s-diff-port-376255 chown docker:docker /home/docker/.ssh/authorized_keys]
I1121 14:29:32.948121 255774 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-376255 --format={{.State.Status}}
I1121 14:29:32.975355 255774 machine.go:94] provisionDockerMachine start ...
I1121 14:29:32.975799 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:33.002463 255774 main.go:143] libmachine: Using SSH client type: native
I1121 14:29:33.002813 255774 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33070 <nil> <nil>}
I1121 14:29:33.002834 255774 main.go:143] libmachine: About to run SSH command:
hostname
I1121 14:29:33.003677 255774 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:37682->127.0.0.1:33070: read: connection reset by peer
I1121 14:29:37.228254 249617 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1121 14:29:37.228434 249617 kubeadm.go:319] [preflight] Running pre-flight checks
I1121 14:29:37.228644 249617 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1121 14:29:37.228822 249617 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1121 14:29:37.228907 249617 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1121 14:29:37.228971 249617 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1121 14:29:37.229029 249617 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1121 14:29:37.229111 249617 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1121 14:29:37.229198 249617 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1121 14:29:37.229264 249617 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1121 14:29:37.229333 249617 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1121 14:29:37.229403 249617 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1121 14:29:37.229468 249617 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1121 14:29:37.229624 249617 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1121 14:29:37.229762 249617 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1121 14:29:37.229892 249617 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1121 14:29:37.230051 249617 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1121 14:29:37.235113 249617 out.go:252] - Generating certificates and keys ...
I1121 14:29:37.235306 249617 kubeadm.go:319] [certs] Using existing ca certificate authority
I1121 14:29:37.235508 249617 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1121 14:29:37.235691 249617 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1121 14:29:37.235858 249617 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1121 14:29:37.236102 249617 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1121 14:29:37.236205 249617 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1121 14:29:37.236303 249617 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1121 14:29:37.236516 249617 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-012258] and IPs [192.168.94.2 127.0.0.1 ::1]
I1121 14:29:37.236607 249617 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1121 14:29:37.236765 249617 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-012258] and IPs [192.168.94.2 127.0.0.1 ::1]
I1121 14:29:37.236861 249617 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1121 14:29:37.236954 249617 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1121 14:29:37.237021 249617 kubeadm.go:319] [certs] Generating "sa" key and public key
I1121 14:29:37.237104 249617 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1121 14:29:37.237178 249617 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1121 14:29:37.237257 249617 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1121 14:29:37.237352 249617 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1121 14:29:37.237438 249617 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1121 14:29:37.237554 249617 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1121 14:29:37.237649 249617 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1121 14:29:37.239227 249617 out.go:252] - Booting up control plane ...
I1121 14:29:37.239369 249617 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1121 14:29:37.239534 249617 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1121 14:29:37.239682 249617 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1121 14:29:37.239829 249617 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1121 14:29:37.239965 249617 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1121 14:29:37.240022 249617 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1121 14:29:37.240260 249617 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1121 14:29:37.240373 249617 kubeadm.go:319] [apiclient] All control plane components are healthy after 5.503152 seconds
I1121 14:29:37.240759 249617 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1121 14:29:37.240933 249617 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1121 14:29:37.241035 249617 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1121 14:29:37.241286 249617 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-012258 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1121 14:29:37.241409 249617 kubeadm.go:319] [bootstrap-token] Using token: yix385.n0xejrlt7sdx1ngs
I1121 14:29:37.243198 249617 out.go:252] - Configuring RBAC rules ...
I1121 14:29:37.243379 249617 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1121 14:29:37.243497 249617 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1121 14:29:37.243755 249617 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1121 14:29:37.243946 249617 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1121 14:29:37.244147 249617 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1121 14:29:37.244287 249617 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1121 14:29:37.244477 249617 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1121 14:29:37.244564 249617 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1121 14:29:37.244632 249617 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1121 14:29:37.244642 249617 kubeadm.go:319]
I1121 14:29:37.244725 249617 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1121 14:29:37.244736 249617 kubeadm.go:319]
I1121 14:29:37.244834 249617 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1121 14:29:37.244845 249617 kubeadm.go:319]
I1121 14:29:37.244877 249617 kubeadm.go:319] mkdir -p $HOME/.kube
I1121 14:29:37.244966 249617 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1121 14:29:37.245033 249617 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1121 14:29:37.245045 249617 kubeadm.go:319]
I1121 14:29:37.245111 249617 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1121 14:29:37.245120 249617 kubeadm.go:319]
I1121 14:29:37.245178 249617 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1121 14:29:37.245192 249617 kubeadm.go:319]
I1121 14:29:37.245274 249617 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1121 14:29:37.245371 249617 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1121 14:29:37.245468 249617 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1121 14:29:37.245476 249617 kubeadm.go:319]
I1121 14:29:37.245604 249617 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1121 14:29:37.245734 249617 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1121 14:29:37.245755 249617 kubeadm.go:319]
I1121 14:29:37.245866 249617 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token yix385.n0xejrlt7sdx1ngs \
I1121 14:29:37.246024 249617 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:2aad198f79b3258593291a08f0028a72548d0fc82d6b54639b7d7d17a52adfdb \
I1121 14:29:37.246062 249617 kubeadm.go:319] --control-plane
I1121 14:29:37.246072 249617 kubeadm.go:319]
I1121 14:29:37.246178 249617 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1121 14:29:37.246189 249617 kubeadm.go:319]
I1121 14:29:37.246294 249617 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token yix385.n0xejrlt7sdx1ngs \
I1121 14:29:37.246443 249617 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:2aad198f79b3258593291a08f0028a72548d0fc82d6b54639b7d7d17a52adfdb
I1121 14:29:37.246454 249617 cni.go:84] Creating CNI manager for ""
I1121 14:29:37.246462 249617 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:29:37.248274 249617 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1121 14:29:36.147516 255774 main.go:143] libmachine: SSH cmd err, output: <nil>: default-k8s-diff-port-376255
I1121 14:29:36.147569 255774 ubuntu.go:182] provisioning hostname "default-k8s-diff-port-376255"
I1121 14:29:36.147633 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:36.169609 255774 main.go:143] libmachine: Using SSH client type: native
I1121 14:29:36.169898 255774 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33070 <nil> <nil>}
I1121 14:29:36.169928 255774 main.go:143] libmachine: About to run SSH command:
sudo hostname default-k8s-diff-port-376255 && echo "default-k8s-diff-port-376255" | sudo tee /etc/hostname
I1121 14:29:36.328958 255774 main.go:143] libmachine: SSH cmd err, output: <nil>: default-k8s-diff-port-376255
I1121 14:29:36.329040 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:36.353105 255774 main.go:143] libmachine: Using SSH client type: native
I1121 14:29:36.353414 255774 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x8416e0] 0x8443c0 <nil> [] 0s} 127.0.0.1 33070 <nil> <nil>}
I1121 14:29:36.353448 255774 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sdefault-k8s-diff-port-376255' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 default-k8s-diff-port-376255/g' /etc/hosts;
else
echo '127.0.1.1 default-k8s-diff-port-376255' | sudo tee -a /etc/hosts;
fi
fi
I1121 14:29:36.504067 255774 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1121 14:29:36.504097 255774 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21847-11004/.minikube CaCertPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21847-11004/.minikube}
I1121 14:29:36.504119 255774 ubuntu.go:190] setting up certificates
I1121 14:29:36.504133 255774 provision.go:84] configureAuth start
I1121 14:29:36.504206 255774 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" default-k8s-diff-port-376255
I1121 14:29:36.528674 255774 provision.go:143] copyHostCerts
I1121 14:29:36.528752 255774 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-11004/.minikube/ca.pem, removing ...
I1121 14:29:36.528762 255774 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-11004/.minikube/ca.pem
I1121 14:29:36.528840 255774 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21847-11004/.minikube/ca.pem (1078 bytes)
I1121 14:29:36.528968 255774 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-11004/.minikube/cert.pem, removing ...
I1121 14:29:36.528997 255774 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-11004/.minikube/cert.pem
I1121 14:29:36.529043 255774 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21847-11004/.minikube/cert.pem (1123 bytes)
I1121 14:29:36.529141 255774 exec_runner.go:144] found /home/jenkins/minikube-integration/21847-11004/.minikube/key.pem, removing ...
I1121 14:29:36.529152 255774 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21847-11004/.minikube/key.pem
I1121 14:29:36.529188 255774 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21847-11004/.minikube/key.pem (1675 bytes)
I1121 14:29:36.529281 255774 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21847-11004/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca-key.pem org=jenkins.default-k8s-diff-port-376255 san=[127.0.0.1 192.168.85.2 default-k8s-diff-port-376255 localhost minikube]
I1121 14:29:36.617208 255774 provision.go:177] copyRemoteCerts
I1121 14:29:36.617283 255774 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1121 14:29:36.617345 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:36.639948 255774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33070 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa Username:docker}
I1121 14:29:36.749486 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1121 14:29:36.777360 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/machines/server.pem --> /etc/docker/server.pem (1249 bytes)
I1121 14:29:36.804875 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1121 14:29:36.830920 255774 provision.go:87] duration metric: took 326.762892ms to configureAuth
I1121 14:29:36.830953 255774 ubuntu.go:206] setting minikube options for container-runtime
I1121 14:29:36.831165 255774 config.go:182] Loaded profile config "default-k8s-diff-port-376255": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1121 14:29:36.831181 255774 machine.go:97] duration metric: took 3.855604158s to provisionDockerMachine
I1121 14:29:36.831191 255774 client.go:176] duration metric: took 11.666782197s to LocalClient.Create
I1121 14:29:36.831216 255774 start.go:167] duration metric: took 11.666902979s to libmachine.API.Create "default-k8s-diff-port-376255"
I1121 14:29:36.831234 255774 start.go:293] postStartSetup for "default-k8s-diff-port-376255" (driver="docker")
I1121 14:29:36.831254 255774 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1121 14:29:36.831311 255774 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1121 14:29:36.831360 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:36.855811 255774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33070 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa Username:docker}
I1121 14:29:36.969760 255774 ssh_runner.go:195] Run: cat /etc/os-release
I1121 14:29:36.974452 255774 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1121 14:29:36.974529 255774 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1121 14:29:36.974577 255774 filesync.go:126] Scanning /home/jenkins/minikube-integration/21847-11004/.minikube/addons for local assets ...
I1121 14:29:36.974658 255774 filesync.go:126] Scanning /home/jenkins/minikube-integration/21847-11004/.minikube/files for local assets ...
I1121 14:29:36.974771 255774 filesync.go:149] local asset: /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem -> 145232.pem in /etc/ssl/certs
I1121 14:29:36.974903 255774 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1121 14:29:36.984975 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem --> /etc/ssl/certs/145232.pem (1708 bytes)
I1121 14:29:37.017462 255774 start.go:296] duration metric: took 186.210262ms for postStartSetup
I1121 14:29:37.017947 255774 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" default-k8s-diff-port-376255
I1121 14:29:37.041309 255774 profile.go:143] Saving config to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/config.json ...
I1121 14:29:37.041659 255774 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1121 14:29:37.041731 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:37.070697 255774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33070 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa Username:docker}
I1121 14:29:37.177189 255774 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1121 14:29:37.185711 255774 start.go:128] duration metric: took 12.024042461s to createHost
I1121 14:29:37.185741 255774 start.go:83] releasing machines lock for "default-k8s-diff-port-376255", held for 12.024206528s
I1121 14:29:37.185820 255774 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" default-k8s-diff-port-376255
I1121 14:29:37.211853 255774 ssh_runner.go:195] Run: cat /version.json
I1121 14:29:37.211903 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:37.211965 255774 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1121 14:29:37.212033 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:37.238575 255774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33070 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa Username:docker}
I1121 14:29:37.242252 255774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33070 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa Username:docker}
I1121 14:29:37.421321 255774 ssh_runner.go:195] Run: systemctl --version
I1121 14:29:37.431728 255774 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1121 14:29:37.437939 255774 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1121 14:29:37.438053 255774 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1121 14:29:37.469409 255774 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1121 14:29:37.469437 255774 start.go:496] detecting cgroup driver to use...
I1121 14:29:37.469471 255774 detect.go:190] detected "systemd" cgroup driver on host os
I1121 14:29:37.469521 255774 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1121 14:29:37.490669 255774 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1121 14:29:37.507754 255774 docker.go:218] disabling cri-docker service (if available) ...
I1121 14:29:37.507821 255774 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1121 14:29:37.525644 255774 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1121 14:29:37.545289 255774 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1121 14:29:37.674060 255774 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1121 14:29:37.795128 255774 docker.go:234] disabling docker service ...
I1121 14:29:37.795198 255774 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1121 14:29:37.819043 255774 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1121 14:29:37.834819 255774 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1121 14:29:37.960408 255774 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1121 14:29:38.072269 255774 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1121 14:29:38.089314 255774 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1121 14:29:38.105248 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1121 14:29:38.117445 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1121 14:29:38.128509 255774 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1121 14:29:38.128607 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1121 14:29:38.139526 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1121 14:29:38.150896 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1121 14:29:38.161459 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1121 14:29:38.173179 255774 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1121 14:29:38.183645 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1121 14:29:38.194923 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1121 14:29:38.207896 255774 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1121 14:29:38.220346 255774 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1121 14:29:38.230823 255774 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1121 14:29:38.241807 255774 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:38.339708 255774 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1121 14:29:38.460319 255774 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1121 14:29:38.460387 255774 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1121 14:29:38.465812 255774 start.go:564] Will wait 60s for crictl version
I1121 14:29:38.465875 255774 ssh_runner.go:195] Run: which crictl
I1121 14:29:38.470166 255774 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1121 14:29:38.507773 255774 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1121 14:29:38.507860 255774 ssh_runner.go:195] Run: containerd --version
I1121 14:29:38.532247 255774 ssh_runner.go:195] Run: containerd --version
I1121 14:29:38.559098 255774 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
W1121 14:29:33.655577 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:29:33.655599 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:29:33.655612 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:36.225853 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:29:36.226247 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:29:36.226304 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:29:36.226364 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:29:36.259583 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:36.259613 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:36.259619 213058 cri.go:89] found id: ""
I1121 14:29:36.259628 213058 logs.go:282] 2 containers: [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:29:36.259690 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:36.264798 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:36.269597 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:29:36.269663 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:29:36.304312 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:36.304335 213058 cri.go:89] found id: ""
I1121 14:29:36.304346 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:29:36.304403 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:36.309760 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:29:36.309833 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:29:36.342617 213058 cri.go:89] found id: ""
I1121 14:29:36.342643 213058 logs.go:282] 0 containers: []
W1121 14:29:36.342653 213058 logs.go:284] No container was found matching "coredns"
I1121 14:29:36.342660 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:29:36.342722 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:29:36.378880 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:36.378909 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:36.378914 213058 cri.go:89] found id: ""
I1121 14:29:36.378924 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:29:36.378996 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:36.384032 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:36.388866 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:29:36.388932 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:29:36.427253 213058 cri.go:89] found id: ""
I1121 14:29:36.427282 213058 logs.go:282] 0 containers: []
W1121 14:29:36.427293 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:29:36.427300 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:29:36.427355 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:29:36.461581 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:36.461604 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:36.461609 213058 cri.go:89] found id: ""
I1121 14:29:36.461618 213058 logs.go:282] 2 containers: [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:29:36.461677 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:36.466623 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:36.471422 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:29:36.471490 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:29:36.503502 213058 cri.go:89] found id: ""
I1121 14:29:36.503533 213058 logs.go:282] 0 containers: []
W1121 14:29:36.503566 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:29:36.503575 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:29:36.503633 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:29:36.538350 213058 cri.go:89] found id: ""
I1121 14:29:36.538379 213058 logs.go:282] 0 containers: []
W1121 14:29:36.538390 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:29:36.538404 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:29:36.538419 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:29:36.666987 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:29:36.667025 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:29:36.685628 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:29:36.685659 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:29:36.763464 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:29:36.763491 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:29:36.763508 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:36.808789 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:29:36.808832 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:36.887558 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:29:36.887596 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:29:36.952391 213058 logs.go:123] Gathering logs for container status ...
I1121 14:29:36.952434 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:29:36.993139 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:29:36.993167 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:37.037499 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:29:37.037552 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:37.084237 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:29:37.084270 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:37.132236 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:29:37.132272 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:37.172720 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:29:37.172753 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:34.341753 252125 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1: (1.474720913s)
I1121 14:29:34.341781 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 from cache
I1121 14:29:34.341812 252125 containerd.go:285] Loading image: /var/lib/minikube/images/etcd_3.6.4-0
I1121 14:29:34.341855 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0
I1121 14:29:37.308520 252125 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0: (2.966633628s)
I1121 14:29:37.308585 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 from cache
I1121 14:29:37.308616 252125 containerd.go:285] Loading image: /var/lib/minikube/images/storage-provisioner_v5
I1121 14:29:37.308666 252125 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5
I1121 14:29:37.772300 252125 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21847-11004/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
I1121 14:29:37.772349 252125 cache_images.go:125] Successfully loaded all cached images
I1121 14:29:37.772358 252125 cache_images.go:94] duration metric: took 13.627858156s to LoadCachedImages
I1121 14:29:37.772375 252125 kubeadm.go:935] updating node { 192.168.103.2 8443 v1.34.1 containerd true true} ...
I1121 14:29:37.772522 252125 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-921956 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.103.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:no-preload-921956 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1121 14:29:37.772622 252125 ssh_runner.go:195] Run: sudo crictl info
I1121 14:29:37.802988 252125 cni.go:84] Creating CNI manager for ""
I1121 14:29:37.803017 252125 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:29:37.803041 252125 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1121 14:29:37.803067 252125 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.103.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-921956 NodeName:no-preload-921956 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.103.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.103.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1121 14:29:37.803212 252125 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.103.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "no-preload-921956"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.103.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.103.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1121 14:29:37.803298 252125 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1121 14:29:37.814189 252125 binaries.go:54] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.34.1: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/binaries/v1.34.1': No such file or directory
Initiating transfer...
I1121 14:29:37.814255 252125 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.34.1
I1121 14:29:37.824124 252125 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl.sha256
I1121 14:29:37.824214 252125 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm.sha256 -> /home/jenkins/minikube-integration/21847-11004/.minikube/cache/linux/amd64/v1.34.1/kubeadm
I1121 14:29:37.824231 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl
I1121 14:29:37.824217 252125 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet.sha256 -> /home/jenkins/minikube-integration/21847-11004/.minikube/cache/linux/amd64/v1.34.1/kubelet
I1121 14:29:37.829417 252125 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubectl: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubectl': No such file or directory
I1121 14:29:37.829466 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/linux/amd64/v1.34.1/kubectl --> /var/lib/minikube/binaries/v1.34.1/kubectl (60559544 bytes)
I1121 14:29:38.860713 252125 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1121 14:29:38.875498 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet
I1121 14:29:38.880447 252125 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubelet: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubelet': No such file or directory
I1121 14:29:38.880477 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/linux/amd64/v1.34.1/kubelet --> /var/lib/minikube/binaries/v1.34.1/kubelet (59195684 bytes)
I1121 14:29:39.014274 252125 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm
I1121 14:29:39.021151 252125 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubeadm: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubeadm': No such file or directory
I1121 14:29:39.021187 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/cache/linux/amd64/v1.34.1/kubeadm --> /var/lib/minikube/binaries/v1.34.1/kubeadm (74027192 bytes)
I1121 14:29:39.234010 252125 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1121 14:29:39.244382 252125 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (322 bytes)
I1121 14:29:39.259897 252125 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1121 14:29:39.279143 252125 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2232 bytes)
I1121 14:29:38.560688 255774 cli_runner.go:164] Run: docker network inspect default-k8s-diff-port-376255 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1121 14:29:38.580956 255774 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I1121 14:29:38.585728 255774 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1121 14:29:38.599140 255774 kubeadm.go:884] updating cluster {Name:default-k8s-diff-port-376255 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-376255 Namespace:default APIServerHAVIP: APISer
verName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDN
SLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1121 14:29:38.599295 255774 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1121 14:29:38.599391 255774 ssh_runner.go:195] Run: sudo crictl images --output json
I1121 14:29:38.631637 255774 containerd.go:627] all images are preloaded for containerd runtime.
I1121 14:29:38.631660 255774 containerd.go:534] Images already preloaded, skipping extraction
I1121 14:29:38.631720 255774 ssh_runner.go:195] Run: sudo crictl images --output json
I1121 14:29:38.665498 255774 containerd.go:627] all images are preloaded for containerd runtime.
I1121 14:29:38.665522 255774 cache_images.go:86] Images are preloaded, skipping loading
I1121 14:29:38.665530 255774 kubeadm.go:935] updating node { 192.168.85.2 8444 v1.34.1 containerd true true} ...
I1121 14:29:38.665659 255774 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=default-k8s-diff-port-376255 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-376255 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1121 14:29:38.665752 255774 ssh_runner.go:195] Run: sudo crictl info
I1121 14:29:38.694106 255774 cni.go:84] Creating CNI manager for ""
I1121 14:29:38.694138 255774 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:29:38.694156 255774 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1121 14:29:38.694182 255774 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8444 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:default-k8s-diff-port-376255 NodeName:default-k8s-diff-port-376255 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/ce
rts/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1121 14:29:38.694318 255774 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8444
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "default-k8s-diff-port-376255"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.85.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8444
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1121 14:29:38.694377 255774 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1121 14:29:38.704016 255774 binaries.go:51] Found k8s binaries, skipping transfer
I1121 14:29:38.704074 255774 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1121 14:29:38.712471 255774 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (332 bytes)
I1121 14:29:38.726311 255774 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1121 14:29:38.743589 255774 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2240 bytes)
I1121 14:29:38.759275 255774 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I1121 14:29:38.763723 255774 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1121 14:29:38.775814 255774 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:38.870850 255774 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:29:38.898876 255774 certs.go:69] Setting up /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255 for IP: 192.168.85.2
I1121 14:29:38.898898 255774 certs.go:195] generating shared ca certs ...
I1121 14:29:38.898917 255774 certs.go:227] acquiring lock for ca certs: {Name:mk4ac68319839cd6684afc66121341297238277f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:38.899068 255774 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21847-11004/.minikube/ca.key
I1121 14:29:38.899116 255774 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.key
I1121 14:29:38.899130 255774 certs.go:257] generating profile certs ...
I1121 14:29:38.899196 255774 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/client.key
I1121 14:29:38.899223 255774 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/client.crt with IP's: []
I1121 14:29:39.101636 255774 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/client.crt ...
I1121 14:29:39.101669 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/client.crt: {Name:mk48f410a390b01d5b10a9357a2648374ae8306b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.101873 255774 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/client.key ...
I1121 14:29:39.101885 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/client.key: {Name:mkb89c45215e08640f5b5fa9a6de6863ea0983e1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.102008 255774 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.key.3377c066
I1121 14:29:39.102024 255774 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.crt.3377c066 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.85.2]
I1121 14:29:39.438352 255774 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.crt.3377c066 ...
I1121 14:29:39.438387 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.crt.3377c066: {Name:mkc5f7dc938a9541dec0c2accd850515b39a25d9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.438574 255774 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.key.3377c066 ...
I1121 14:29:39.438586 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.key.3377c066: {Name:mka67f2d91e35acd02a0ed4174188db6877ef796 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.438666 255774 certs.go:382] copying /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.crt.3377c066 -> /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.crt
I1121 14:29:39.438744 255774 certs.go:386] copying /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.key.3377c066 -> /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.key
I1121 14:29:39.438811 255774 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.key
I1121 14:29:39.438826 255774 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.crt with IP's: []
I1121 14:29:39.523793 255774 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.crt ...
I1121 14:29:39.523827 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.crt: {Name:mk2418751bb08ae4f2cae2628ba430b2e731f823 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.524011 255774 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.key ...
I1121 14:29:39.524031 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.key: {Name:mk12031f310020bd38886fd870544563c6ab1faf Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.524255 255774 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523.pem (1338 bytes)
W1121 14:29:39.524307 255774 certs.go:480] ignoring /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523_empty.pem, impossibly tiny 0 bytes
I1121 14:29:39.524323 255774 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca-key.pem (1675 bytes)
I1121 14:29:39.524353 255774 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem (1078 bytes)
I1121 14:29:39.524383 255774 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem (1123 bytes)
I1121 14:29:39.524407 255774 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/key.pem (1675 bytes)
I1121 14:29:39.524445 255774 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem (1708 bytes)
I1121 14:29:39.525071 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1121 14:29:39.546065 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I1121 14:29:39.565880 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1121 14:29:39.585450 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1121 14:29:39.604394 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1440 bytes)
I1121 14:29:39.623736 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1121 14:29:39.642460 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1121 14:29:39.661463 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/default-k8s-diff-port-376255/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1121 14:29:39.681314 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523.pem --> /usr/share/ca-certificates/14523.pem (1338 bytes)
I1121 14:29:39.879137 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem --> /usr/share/ca-certificates/145232.pem (1708 bytes)
I1121 14:29:39.899730 255774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1121 14:29:39.918630 255774 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1121 14:29:39.935942 255774 ssh_runner.go:195] Run: openssl version
I1121 14:29:39.943062 255774 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/145232.pem && ln -fs /usr/share/ca-certificates/145232.pem /etc/ssl/certs/145232.pem"
I1121 14:29:40.020861 255774 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/145232.pem
I1121 14:29:40.026152 255774 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 21 14:01 /usr/share/ca-certificates/145232.pem
I1121 14:29:40.026209 255774 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/145232.pem
I1121 14:29:40.067681 255774 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/145232.pem /etc/ssl/certs/3ec20f2e.0"
I1121 14:29:40.077051 255774 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1121 14:29:40.087944 255774 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:40.092369 255774 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 21 13:56 /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:40.092434 255774 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:40.132125 255774 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1121 14:29:40.142255 255774 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/14523.pem && ln -fs /usr/share/ca-certificates/14523.pem /etc/ssl/certs/14523.pem"
I1121 14:29:40.152828 255774 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/14523.pem
I1121 14:29:40.157171 255774 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 21 14:01 /usr/share/ca-certificates/14523.pem
I1121 14:29:40.157265 255774 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/14523.pem
I1121 14:29:40.199881 255774 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/14523.pem /etc/ssl/certs/51391683.0"
I1121 14:29:40.210053 255774 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1121 14:29:40.214456 255774 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1121 14:29:40.214524 255774 kubeadm.go:401] StartCluster: {Name:default-k8s-diff-port-376255 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-376255 Namespace:default APIServerHAVIP: APIServer
Name:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLo
g:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1121 14:29:40.214625 255774 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1121 14:29:40.214692 255774 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1121 14:29:40.249359 255774 cri.go:89] found id: ""
I1121 14:29:40.249429 255774 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1121 14:29:40.259121 255774 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1121 14:29:40.270847 255774 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1121 14:29:40.270910 255774 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1121 14:29:40.283266 255774 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1121 14:29:40.283287 255774 kubeadm.go:158] found existing configuration files:
I1121 14:29:40.283341 255774 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/admin.conf
I1121 14:29:40.293676 255774 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1121 14:29:40.293725 255774 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1121 14:29:40.303277 255774 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/kubelet.conf
I1121 14:29:40.313015 255774 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1121 14:29:40.313073 255774 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1121 14:29:40.322086 255774 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/controller-manager.conf
I1121 14:29:40.330920 255774 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1121 14:29:40.331015 255774 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1121 14:29:40.339376 255774 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/scheduler.conf
I1121 14:29:40.347984 255774 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1121 14:29:40.348046 255774 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1121 14:29:40.356683 255774 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1121 14:29:40.404354 255774 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1121 14:29:40.404455 255774 kubeadm.go:319] [preflight] Running pre-flight checks
I1121 14:29:40.435448 255774 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1121 14:29:40.435583 255774 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1121 14:29:40.435628 255774 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1121 14:29:40.435689 255774 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1121 14:29:40.435827 255774 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1121 14:29:40.435905 255774 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1121 14:29:40.436039 255774 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1121 14:29:40.436108 255774 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1121 14:29:40.436176 255774 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1121 14:29:40.436276 255774 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1121 14:29:40.436351 255774 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1121 14:29:40.508224 255774 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1121 14:29:40.508370 255774 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1121 14:29:40.508531 255774 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1121 14:29:40.513996 255774 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1121 14:29:39.295828 252125 ssh_runner.go:195] Run: grep 192.168.103.2 control-plane.minikube.internal$ /etc/hosts
I1121 14:29:39.301164 252125 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.103.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1121 14:29:39.312709 252125 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:39.400897 252125 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:29:39.429294 252125 certs.go:69] Setting up /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956 for IP: 192.168.103.2
I1121 14:29:39.429315 252125 certs.go:195] generating shared ca certs ...
I1121 14:29:39.429332 252125 certs.go:227] acquiring lock for ca certs: {Name:mk4ac68319839cd6684afc66121341297238277f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.429485 252125 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21847-11004/.minikube/ca.key
I1121 14:29:39.429583 252125 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.key
I1121 14:29:39.429600 252125 certs.go:257] generating profile certs ...
I1121 14:29:39.429678 252125 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/client.key
I1121 14:29:39.429693 252125 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/client.crt with IP's: []
I1121 14:29:39.556088 252125 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/client.crt ...
I1121 14:29:39.556115 252125 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/client.crt: {Name:mkc697edce2d4ccb5a4a2ccbe74255aef4a205c9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.556297 252125 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/client.key ...
I1121 14:29:39.556312 252125 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/client.key: {Name:mkad7b167b883af61314c3f8b6c71358edc782dd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.556419 252125 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.key.a2c9a71d
I1121 14:29:39.556435 252125 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.crt.a2c9a71d with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.103.2]
I1121 14:29:39.871499 252125 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.crt.a2c9a71d ...
I1121 14:29:39.871529 252125 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.crt.a2c9a71d: {Name:mkc839b1c936af809ed1159ef4599336fd260d32 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.871726 252125 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.key.a2c9a71d ...
I1121 14:29:39.871748 252125 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.key.a2c9a71d: {Name:mkc2f0abcac84f6547f3e0edb165e90b14fdd7c2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:39.871882 252125 certs.go:382] copying /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.crt.a2c9a71d -> /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.crt
I1121 14:29:39.871997 252125 certs.go:386] copying /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.key.a2c9a71d -> /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.key
I1121 14:29:39.872096 252125 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.key
I1121 14:29:39.872120 252125 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.crt with IP's: []
I1121 14:29:40.083173 252125 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.crt ...
I1121 14:29:40.083201 252125 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.crt: {Name:mkba7efd029f616230e0b3cf14c4f32abac0549e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:40.083385 252125 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.key ...
I1121 14:29:40.083414 252125 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.key: {Name:mk24f6fbb57f5dfce4a401be193e0a832a6ccf6a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:40.083661 252125 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523.pem (1338 bytes)
W1121 14:29:40.083700 252125 certs.go:480] ignoring /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523_empty.pem, impossibly tiny 0 bytes
I1121 14:29:40.083711 252125 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca-key.pem (1675 bytes)
I1121 14:29:40.083749 252125 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/ca.pem (1078 bytes)
I1121 14:29:40.083780 252125 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/cert.pem (1123 bytes)
I1121 14:29:40.083827 252125 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/certs/key.pem (1675 bytes)
I1121 14:29:40.083887 252125 certs.go:484] found cert: /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem (1708 bytes)
I1121 14:29:40.084653 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1121 14:29:40.106430 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I1121 14:29:40.126520 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1121 14:29:40.148412 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1121 14:29:40.169973 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1121 14:29:40.191493 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1121 14:29:40.214458 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1121 14:29:40.234692 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/profiles/no-preload-921956/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1121 14:29:40.261986 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/files/etc/ssl/certs/145232.pem --> /usr/share/ca-certificates/145232.pem (1708 bytes)
I1121 14:29:40.352437 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1121 14:29:40.372804 252125 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21847-11004/.minikube/certs/14523.pem --> /usr/share/ca-certificates/14523.pem (1338 bytes)
I1121 14:29:40.394700 252125 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1121 14:29:40.411183 252125 ssh_runner.go:195] Run: openssl version
I1121 14:29:40.419607 252125 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1121 14:29:40.431060 252125 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:40.436371 252125 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 21 13:56 /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:40.436429 252125 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1121 14:29:40.481320 252125 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1121 14:29:40.492797 252125 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/14523.pem && ln -fs /usr/share/ca-certificates/14523.pem /etc/ssl/certs/14523.pem"
I1121 14:29:40.502878 252125 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/14523.pem
I1121 14:29:40.507432 252125 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 21 14:01 /usr/share/ca-certificates/14523.pem
I1121 14:29:40.507499 252125 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/14523.pem
I1121 14:29:40.567779 252125 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/14523.pem /etc/ssl/certs/51391683.0"
I1121 14:29:40.577673 252125 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/145232.pem && ln -fs /usr/share/ca-certificates/145232.pem /etc/ssl/certs/145232.pem"
I1121 14:29:40.587826 252125 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/145232.pem
I1121 14:29:40.592472 252125 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 21 14:01 /usr/share/ca-certificates/145232.pem
I1121 14:29:40.592528 252125 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/145232.pem
I1121 14:29:40.627626 252125 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/145232.pem /etc/ssl/certs/3ec20f2e.0"
I1121 14:29:40.637464 252125 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1121 14:29:40.641884 252125 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1121 14:29:40.641943 252125 kubeadm.go:401] StartCluster: {Name:no-preload-921956 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763507788-21924@sha256:1e20c07242571f3eb6bbb213b88269c923b5578034662e07409047e7102bdd1a Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-921956 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISer
verNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFir
mwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1121 14:29:40.642030 252125 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1121 14:29:40.642085 252125 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1121 14:29:40.673351 252125 cri.go:89] found id: ""
I1121 14:29:40.673423 252125 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1121 14:29:40.682715 252125 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1121 14:29:40.691493 252125 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1121 14:29:40.691581 252125 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1121 14:29:40.700143 252125 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1121 14:29:40.700160 252125 kubeadm.go:158] found existing configuration files:
I1121 14:29:40.700205 252125 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1121 14:29:40.708734 252125 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1121 14:29:40.708799 252125 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1121 14:29:40.717135 252125 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1121 14:29:40.726191 252125 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1121 14:29:40.726262 252125 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1121 14:29:40.734074 252125 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1121 14:29:40.742647 252125 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1121 14:29:40.742709 252125 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1121 14:29:40.751091 252125 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1121 14:29:40.759770 252125 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1121 14:29:40.759841 252125 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1121 14:29:40.768253 252125 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1121 14:29:40.810825 252125 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1121 14:29:40.810892 252125 kubeadm.go:319] [preflight] Running pre-flight checks
I1121 14:29:40.831836 252125 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1121 14:29:40.831940 252125 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1121 14:29:40.832026 252125 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1121 14:29:40.832115 252125 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1121 14:29:40.832212 252125 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1121 14:29:40.832286 252125 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1121 14:29:40.832358 252125 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1121 14:29:40.832432 252125 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1121 14:29:40.832504 252125 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1121 14:29:40.832668 252125 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1121 14:29:40.832735 252125 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1121 14:29:40.895341 252125 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1121 14:29:40.895491 252125 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1121 14:29:40.895637 252125 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1121 14:29:40.901358 252125 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1121 14:29:37.249631 249617 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1121 14:29:37.262987 249617 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1121 14:29:37.263020 249617 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1121 14:29:37.283444 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1121 14:29:38.138719 249617 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1121 14:29:38.138808 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:38.138810 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-012258 minikube.k8s.io/updated_at=2025_11_21T14_29_38_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=29e0798733fefbdc471fd2bbb38f6a7ae2a26162 minikube.k8s.io/name=old-k8s-version-012258 minikube.k8s.io/primary=true
I1121 14:29:38.150782 249617 ops.go:34] apiserver oom_adj: -16
I1121 14:29:38.225220 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:38.726231 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:39.225533 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:39.725591 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:40.225601 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:40.725734 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:41.226112 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:40.521190 255774 out.go:252] - Generating certificates and keys ...
I1121 14:29:40.521325 255774 kubeadm.go:319] [certs] Using existing ca certificate authority
I1121 14:29:40.521431 255774 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1121 14:29:41.003970 255774 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1121 14:29:41.240665 255774 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1121 14:29:41.425685 255774 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1121 14:29:41.689428 255774 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1121 14:29:41.923373 255774 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1121 14:29:41.923563 255774 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [default-k8s-diff-port-376255 localhost] and IPs [192.168.85.2 127.0.0.1 ::1]
I1121 14:29:42.051973 255774 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1121 14:29:42.052979 255774 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [default-k8s-diff-port-376255 localhost] and IPs [192.168.85.2 127.0.0.1 ::1]
I1121 14:29:42.277531 255774 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1121 14:29:42.491572 255774 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1121 14:29:42.605458 255774 kubeadm.go:319] [certs] Generating "sa" key and public key
I1121 14:29:42.605535 255774 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1121 14:29:42.870659 255774 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1121 14:29:43.039072 255774 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1121 14:29:43.228611 255774 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1121 14:29:43.489903 255774 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1121 14:29:43.563271 255774 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1121 14:29:43.563948 255774 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1121 14:29:43.568453 255774 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1121 14:29:39.727688 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:29:39.728083 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:29:39.728134 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:29:39.728197 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:29:39.758413 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:39.758436 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:39.758441 213058 cri.go:89] found id: ""
I1121 14:29:39.758452 213058 logs.go:282] 2 containers: [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:29:39.758508 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:39.763439 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:39.767912 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:29:39.767980 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:29:39.802923 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:39.802948 213058 cri.go:89] found id: ""
I1121 14:29:39.802957 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:29:39.803013 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:39.807778 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:29:39.807853 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:29:39.835286 213058 cri.go:89] found id: ""
I1121 14:29:39.835314 213058 logs.go:282] 0 containers: []
W1121 14:29:39.835335 213058 logs.go:284] No container was found matching "coredns"
I1121 14:29:39.835343 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:29:39.835408 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:29:39.864986 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:39.865034 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:39.865040 213058 cri.go:89] found id: ""
I1121 14:29:39.865050 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:29:39.865105 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:39.869441 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:39.873676 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:29:39.873739 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:29:39.902671 213058 cri.go:89] found id: ""
I1121 14:29:39.902698 213058 logs.go:282] 0 containers: []
W1121 14:29:39.902707 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:29:39.902715 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:29:39.902762 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:29:39.933452 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:39.933477 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:39.933483 213058 cri.go:89] found id: ""
I1121 14:29:39.933492 213058 logs.go:282] 2 containers: [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:29:39.933557 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:39.938051 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:39.942029 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:29:39.942094 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:29:39.969991 213058 cri.go:89] found id: ""
I1121 14:29:39.970018 213058 logs.go:282] 0 containers: []
W1121 14:29:39.970028 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:29:39.970036 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:29:39.970086 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:29:39.997381 213058 cri.go:89] found id: ""
I1121 14:29:39.997406 213058 logs.go:282] 0 containers: []
W1121 14:29:39.997417 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:29:39.997429 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:29:39.997443 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:40.027188 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:29:40.027213 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:40.067878 213058 logs.go:123] Gathering logs for container status ...
I1121 14:29:40.067906 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:29:40.101358 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:29:40.101388 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:29:40.115674 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:29:40.115704 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:40.153845 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:29:40.153871 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:40.188913 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:29:40.188944 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:29:40.244995 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:29:40.245033 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:29:40.351506 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:29:40.351558 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:29:40.417221 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:29:40.417244 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:29:40.417263 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:40.457789 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:29:40.457836 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:40.520712 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:29:40.520748 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:43.056648 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:29:43.057094 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:29:43.057150 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:29:43.057204 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:29:43.085236 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:43.085260 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:43.085265 213058 cri.go:89] found id: ""
I1121 14:29:43.085275 213058 logs.go:282] 2 containers: [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:29:43.085333 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:43.089868 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:43.094074 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:29:43.094134 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:29:43.122420 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:43.122447 213058 cri.go:89] found id: ""
I1121 14:29:43.122457 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:29:43.122512 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:43.126830 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:29:43.126892 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:29:43.156518 213058 cri.go:89] found id: ""
I1121 14:29:43.156566 213058 logs.go:282] 0 containers: []
W1121 14:29:43.156577 213058 logs.go:284] No container was found matching "coredns"
I1121 14:29:43.156584 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:29:43.156646 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:29:43.185212 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:43.185233 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:43.185238 213058 cri.go:89] found id: ""
I1121 14:29:43.185277 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:29:43.185338 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:43.190000 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:43.194074 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:29:43.194131 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:29:43.224175 213058 cri.go:89] found id: ""
I1121 14:29:43.224201 213058 logs.go:282] 0 containers: []
W1121 14:29:43.224211 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:29:43.224218 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:29:43.224277 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:29:43.258260 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:43.258292 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:43.258299 213058 cri.go:89] found id: ""
I1121 14:29:43.258310 213058 logs.go:282] 2 containers: [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:29:43.258378 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:43.263276 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:43.268195 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:29:43.268264 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:29:43.303269 213058 cri.go:89] found id: ""
I1121 14:29:43.303300 213058 logs.go:282] 0 containers: []
W1121 14:29:43.303311 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:29:43.303319 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:29:43.303379 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:29:43.333956 213058 cri.go:89] found id: ""
I1121 14:29:43.333985 213058 logs.go:282] 0 containers: []
W1121 14:29:43.333995 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:29:43.334007 213058 logs.go:123] Gathering logs for container status ...
I1121 14:29:43.334021 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:29:43.366338 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:29:43.366369 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:29:43.458987 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:29:43.459027 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:43.497960 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:29:43.497995 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:43.539997 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:29:43.540035 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:43.575882 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:29:43.575911 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:40.903405 252125 out.go:252] - Generating certificates and keys ...
I1121 14:29:40.903502 252125 kubeadm.go:319] [certs] Using existing ca certificate authority
I1121 14:29:40.903630 252125 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1121 14:29:41.180390 252125 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1121 14:29:41.211121 252125 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1121 14:29:41.523007 252125 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1121 14:29:42.461521 252125 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1121 14:29:42.641495 252125 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1121 14:29:42.641701 252125 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost no-preload-921956] and IPs [192.168.103.2 127.0.0.1 ::1]
I1121 14:29:42.773640 252125 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1121 14:29:42.773843 252125 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost no-preload-921956] and IPs [192.168.103.2 127.0.0.1 ::1]
I1121 14:29:42.921369 252125 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1121 14:29:43.256203 252125 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1121 14:29:43.834470 252125 kubeadm.go:319] [certs] Generating "sa" key and public key
I1121 14:29:43.834645 252125 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1121 14:29:43.949422 252125 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1121 14:29:44.093777 252125 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1121 14:29:44.227287 252125 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1121 14:29:44.509482 252125 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1121 14:29:44.696294 252125 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1121 14:29:44.696767 252125 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1121 14:29:44.705846 252125 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1121 14:29:43.573374 255774 out.go:252] - Booting up control plane ...
I1121 14:29:43.573510 255774 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1121 14:29:43.573669 255774 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1121 14:29:43.573781 255774 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1121 14:29:43.590344 255774 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1121 14:29:43.590494 255774 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1121 14:29:43.599838 255774 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1121 14:29:43.600184 255774 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1121 14:29:43.600247 255774 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1121 14:29:43.720721 255774 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1121 14:29:43.720878 255774 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1121 14:29:44.721899 255774 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.001196965s
I1121 14:29:44.724830 255774 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1121 14:29:44.724972 255774 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.85.2:8444/livez
I1121 14:29:44.725131 255774 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1121 14:29:44.725253 255774 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1121 14:29:41.726266 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:42.225460 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:42.725727 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:43.225740 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:43.725669 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:44.225350 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:44.725651 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:45.226025 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:45.725289 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:46.226316 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:43.632243 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:29:43.632278 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:43.681909 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:29:43.681959 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:43.723402 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:29:43.723454 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:29:43.776606 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:29:43.776641 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:29:43.793171 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:29:43.793200 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:29:43.854264 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:29:43.854293 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:29:43.854308 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:46.383659 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:29:46.384075 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:29:46.384128 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:29:46.384191 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:29:46.441629 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:46.441734 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:46.441754 213058 cri.go:89] found id: ""
I1121 14:29:46.441776 213058 logs.go:282] 2 containers: [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:29:46.441873 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:46.447714 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:46.453337 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:29:46.453422 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:29:46.497451 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:46.497475 213058 cri.go:89] found id: ""
I1121 14:29:46.497485 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:29:46.497585 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:46.504731 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:29:46.504801 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:29:46.562972 213058 cri.go:89] found id: ""
I1121 14:29:46.563014 213058 logs.go:282] 0 containers: []
W1121 14:29:46.563027 213058 logs.go:284] No container was found matching "coredns"
I1121 14:29:46.563036 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:29:46.563287 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:29:46.611186 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:46.611216 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:46.611221 213058 cri.go:89] found id: ""
I1121 14:29:46.611231 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:29:46.611289 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:46.620404 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:46.626388 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:29:46.626559 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:29:46.674192 213058 cri.go:89] found id: ""
I1121 14:29:46.674247 213058 logs.go:282] 0 containers: []
W1121 14:29:46.674259 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:29:46.674267 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:29:46.674448 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:29:46.749738 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:46.749765 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:46.749771 213058 cri.go:89] found id: ""
I1121 14:29:46.749780 213058 logs.go:282] 2 containers: [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:29:46.749835 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:46.756273 213058 ssh_runner.go:195] Run: which crictl
I1121 14:29:46.763986 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:29:46.764120 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:29:46.811858 213058 cri.go:89] found id: ""
I1121 14:29:46.811883 213058 logs.go:282] 0 containers: []
W1121 14:29:46.811901 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:29:46.811909 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:29:46.811963 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:29:46.849599 213058 cri.go:89] found id: ""
I1121 14:29:46.849645 213058 logs.go:282] 0 containers: []
W1121 14:29:46.849655 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:29:46.849666 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:29:46.849683 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:29:46.913988 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:29:46.914024 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:29:46.953189 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:29:46.953227 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:29:47.001663 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:29:47.001705 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:29:47.041106 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:29:47.041137 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:29:47.107673 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:29:47.107712 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:29:47.240432 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:29:47.240473 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:29:47.288852 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:29:47.288894 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1121 14:29:46.531314 255774 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 1.80645272s
I1121 14:29:47.509316 255774 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 2.784421033s
I1121 14:29:49.226647 255774 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 4.501794549s
I1121 14:29:49.239409 255774 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1121 14:29:49.252719 255774 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1121 14:29:49.264076 255774 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1121 14:29:49.264371 255774 kubeadm.go:319] [mark-control-plane] Marking the node default-k8s-diff-port-376255 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1121 14:29:49.274799 255774 kubeadm.go:319] [bootstrap-token] Using token: 8nwcfl.9utqukqcvuro6a4p
I1121 14:29:44.769338 252125 out.go:252] - Booting up control plane ...
I1121 14:29:44.769476 252125 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1121 14:29:44.769652 252125 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1121 14:29:44.769771 252125 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1121 14:29:44.769940 252125 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1121 14:29:44.770087 252125 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1121 14:29:44.778391 252125 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1121 14:29:44.779655 252125 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1121 14:29:44.779729 252125 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1121 14:29:44.894196 252125 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1121 14:29:44.894364 252125 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1121 14:29:45.895053 252125 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.000974959s
I1121 14:29:45.898754 252125 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1121 14:29:45.898875 252125 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.103.2:8443/livez
I1121 14:29:45.899003 252125 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1121 14:29:45.899149 252125 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1121 14:29:48.621169 252125 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 2.722350043s
I1121 14:29:49.059709 252125 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 3.160801257s
I1121 14:29:49.276414 255774 out.go:252] - Configuring RBAC rules ...
I1121 14:29:49.276590 255774 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1121 14:29:49.280532 255774 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1121 14:29:49.287374 255774 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1121 14:29:49.290401 255774 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1121 14:29:49.293308 255774 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1121 14:29:49.297552 255774 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1121 14:29:49.632747 255774 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1121 14:29:46.726037 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:47.228665 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:47.725338 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:48.226199 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:48.725959 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:49.225812 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:49.725337 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:50.225293 249617 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:50.310282 249617 kubeadm.go:1114] duration metric: took 12.17154172s to wait for elevateKubeSystemPrivileges
I1121 14:29:50.310322 249617 kubeadm.go:403] duration metric: took 23.370802852s to StartCluster
I1121 14:29:50.310347 249617 settings.go:142] acquiring lock: {Name:mkfe3f8167491ec1abfca3e17282002404072955 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:50.310438 249617 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21847-11004/kubeconfig
I1121 14:29:50.311864 249617 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/kubeconfig: {Name:mk5d3e3ed379bd47c91313113a93ad7e3f44dbb9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:50.312167 249617 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1121 14:29:50.312169 249617 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.94.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1121 14:29:50.312267 249617 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1121 14:29:50.312352 249617 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-012258"
I1121 14:29:50.312372 249617 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-012258"
I1121 14:29:50.312403 249617 host.go:66] Checking if "old-k8s-version-012258" exists ...
I1121 14:29:50.312458 249617 config.go:182] Loaded profile config "old-k8s-version-012258": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1121 14:29:50.312516 249617 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-012258"
I1121 14:29:50.312530 249617 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-012258"
I1121 14:29:50.312827 249617 cli_runner.go:164] Run: docker container inspect old-k8s-version-012258 --format={{.State.Status}}
I1121 14:29:50.312965 249617 cli_runner.go:164] Run: docker container inspect old-k8s-version-012258 --format={{.State.Status}}
I1121 14:29:50.314603 249617 out.go:179] * Verifying Kubernetes components...
I1121 14:29:50.316238 249617 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:50.339724 249617 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:50.056893 255774 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1121 14:29:50.634602 255774 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1121 14:29:50.635720 255774 kubeadm.go:319]
I1121 14:29:50.635840 255774 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1121 14:29:50.635916 255774 kubeadm.go:319]
I1121 14:29:50.636085 255774 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1121 14:29:50.636139 255774 kubeadm.go:319]
I1121 14:29:50.636189 255774 kubeadm.go:319] mkdir -p $HOME/.kube
I1121 14:29:50.636300 255774 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1121 14:29:50.636386 255774 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1121 14:29:50.636448 255774 kubeadm.go:319]
I1121 14:29:50.636574 255774 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1121 14:29:50.636584 255774 kubeadm.go:319]
I1121 14:29:50.636647 255774 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1121 14:29:50.636652 255774 kubeadm.go:319]
I1121 14:29:50.636709 255774 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1121 14:29:50.636796 255774 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1121 14:29:50.636878 255774 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1121 14:29:50.636886 255774 kubeadm.go:319]
I1121 14:29:50.636981 255774 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1121 14:29:50.637083 255774 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1121 14:29:50.637090 255774 kubeadm.go:319]
I1121 14:29:50.637247 255774 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8444 --token 8nwcfl.9utqukqcvuro6a4p \
I1121 14:29:50.637414 255774 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:2aad198f79b3258593291a08f0028a72548d0fc82d6b54639b7d7d17a52adfdb \
I1121 14:29:50.637449 255774 kubeadm.go:319] --control-plane
I1121 14:29:50.637460 255774 kubeadm.go:319]
I1121 14:29:50.637571 255774 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1121 14:29:50.637580 255774 kubeadm.go:319]
I1121 14:29:50.637672 255774 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8444 --token 8nwcfl.9utqukqcvuro6a4p \
I1121 14:29:50.637785 255774 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:2aad198f79b3258593291a08f0028a72548d0fc82d6b54639b7d7d17a52adfdb
I1121 14:29:50.642202 255774 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1121 14:29:50.642513 255774 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1121 14:29:50.642647 255774 cni.go:84] Creating CNI manager for ""
I1121 14:29:50.642693 255774 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:29:50.645524 255774 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1121 14:29:50.339929 249617 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-012258"
I1121 14:29:50.339977 249617 host.go:66] Checking if "old-k8s-version-012258" exists ...
I1121 14:29:50.340433 249617 cli_runner.go:164] Run: docker container inspect old-k8s-version-012258 --format={{.State.Status}}
I1121 14:29:50.341133 249617 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:29:50.341154 249617 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1121 14:29:50.341208 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:50.377822 249617 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1121 14:29:50.377846 249617 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1121 14:29:50.377844 249617 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33060 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/old-k8s-version-012258/id_rsa Username:docker}
I1121 14:29:50.377907 249617 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-012258
I1121 14:29:50.410483 249617 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33060 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/old-k8s-version-012258/id_rsa Username:docker}
I1121 14:29:50.415901 249617 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.94.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1121 14:29:50.468678 249617 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:29:50.503643 249617 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:29:50.536480 249617 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1121 14:29:50.667362 249617 start.go:977] {"host.minikube.internal": 192.168.94.1} host record injected into CoreDNS's ConfigMap
I1121 14:29:50.668484 249617 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-012258" to be "Ready" ...
I1121 14:29:50.954598 249617 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1121 14:29:50.401999 252125 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 4.502477764s
I1121 14:29:50.419850 252125 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1121 14:29:50.933016 252125 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1121 14:29:50.948821 252125 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1121 14:29:50.949093 252125 kubeadm.go:319] [mark-control-plane] Marking the node no-preload-921956 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1121 14:29:50.961417 252125 kubeadm.go:319] [bootstrap-token] Using token: uhuim0.7wh8hbt7v76eo7qs
I1121 14:29:50.955828 249617 addons.go:530] duration metric: took 643.55365ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1121 14:29:51.174831 249617 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-012258" context rescaled to 1 replicas
I1121 14:29:50.963415 252125 out.go:252] - Configuring RBAC rules ...
I1121 14:29:50.963588 252125 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1121 14:29:50.971176 252125 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1121 14:29:50.980644 252125 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1121 14:29:50.985255 252125 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1121 14:29:50.989946 252125 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1121 14:29:50.994015 252125 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1121 14:29:51.128309 252125 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1121 14:29:51.550178 252125 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1121 14:29:52.128624 252125 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1121 14:29:52.129402 252125 kubeadm.go:319]
I1121 14:29:52.129496 252125 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1121 14:29:52.129528 252125 kubeadm.go:319]
I1121 14:29:52.129657 252125 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1121 14:29:52.129669 252125 kubeadm.go:319]
I1121 14:29:52.129705 252125 kubeadm.go:319] mkdir -p $HOME/.kube
I1121 14:29:52.129798 252125 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1121 14:29:52.129906 252125 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1121 14:29:52.129923 252125 kubeadm.go:319]
I1121 14:29:52.129995 252125 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1121 14:29:52.130004 252125 kubeadm.go:319]
I1121 14:29:52.130078 252125 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1121 14:29:52.130087 252125 kubeadm.go:319]
I1121 14:29:52.130170 252125 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1121 14:29:52.130304 252125 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1121 14:29:52.130418 252125 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1121 14:29:52.130446 252125 kubeadm.go:319]
I1121 14:29:52.130574 252125 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1121 14:29:52.130677 252125 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1121 14:29:52.130685 252125 kubeadm.go:319]
I1121 14:29:52.130797 252125 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token uhuim0.7wh8hbt7v76eo7qs \
I1121 14:29:52.130966 252125 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:2aad198f79b3258593291a08f0028a72548d0fc82d6b54639b7d7d17a52adfdb \
I1121 14:29:52.131000 252125 kubeadm.go:319] --control-plane
I1121 14:29:52.131035 252125 kubeadm.go:319]
I1121 14:29:52.131212 252125 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1121 14:29:52.131230 252125 kubeadm.go:319]
I1121 14:29:52.131343 252125 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token uhuim0.7wh8hbt7v76eo7qs \
I1121 14:29:52.131485 252125 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:2aad198f79b3258593291a08f0028a72548d0fc82d6b54639b7d7d17a52adfdb
I1121 14:29:52.132830 252125 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1121 14:29:52.132967 252125 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1121 14:29:52.133003 252125 cni.go:84] Creating CNI manager for ""
I1121 14:29:52.133014 252125 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1121 14:29:52.134968 252125 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1121 14:29:52.136241 252125 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1121 14:29:52.141107 252125 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.1/kubectl ...
I1121 14:29:52.141131 252125 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1121 14:29:52.155585 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1121 14:29:52.395340 252125 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1121 14:29:52.395422 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:52.395526 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes no-preload-921956 minikube.k8s.io/updated_at=2025_11_21T14_29_52_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=29e0798733fefbdc471fd2bbb38f6a7ae2a26162 minikube.k8s.io/name=no-preload-921956 minikube.k8s.io/primary=true
I1121 14:29:52.481012 252125 ops.go:34] apiserver oom_adj: -16
I1121 14:29:52.481125 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:52.982198 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:53.481748 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:53.981282 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:50.646815 255774 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1121 14:29:50.654615 255774 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.1/kubectl ...
I1121 14:29:50.654642 255774 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1121 14:29:50.673887 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1121 14:29:50.944978 255774 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1121 14:29:50.945143 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:50.945309 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes default-k8s-diff-port-376255 minikube.k8s.io/updated_at=2025_11_21T14_29_50_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=29e0798733fefbdc471fd2bbb38f6a7ae2a26162 minikube.k8s.io/name=default-k8s-diff-port-376255 minikube.k8s.io/primary=true
I1121 14:29:50.960009 255774 ops.go:34] apiserver oom_adj: -16
I1121 14:29:51.036596 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:51.537134 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:52.037345 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:52.536941 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:53.037592 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:53.536966 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:54.036678 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:54.536697 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:55.037499 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:55.536808 255774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:55.610391 255774 kubeadm.go:1114] duration metric: took 4.665295307s to wait for elevateKubeSystemPrivileges
I1121 14:29:55.610426 255774 kubeadm.go:403] duration metric: took 15.395907943s to StartCluster
I1121 14:29:55.610448 255774 settings.go:142] acquiring lock: {Name:mkfe3f8167491ec1abfca3e17282002404072955 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:55.610511 255774 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21847-11004/kubeconfig
I1121 14:29:55.612071 255774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/kubeconfig: {Name:mk5d3e3ed379bd47c91313113a93ad7e3f44dbb9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:55.612346 255774 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.85.2 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1121 14:29:55.612498 255774 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1121 14:29:55.612612 255774 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1121 14:29:55.612696 255774 addons.go:70] Setting storage-provisioner=true in profile "default-k8s-diff-port-376255"
I1121 14:29:55.612713 255774 addons.go:239] Setting addon storage-provisioner=true in "default-k8s-diff-port-376255"
I1121 14:29:55.612745 255774 host.go:66] Checking if "default-k8s-diff-port-376255" exists ...
I1121 14:29:55.612775 255774 config.go:182] Loaded profile config "default-k8s-diff-port-376255": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1121 14:29:55.612835 255774 addons.go:70] Setting default-storageclass=true in profile "default-k8s-diff-port-376255"
I1121 14:29:55.612852 255774 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "default-k8s-diff-port-376255"
I1121 14:29:55.613218 255774 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-376255 --format={{.State.Status}}
I1121 14:29:55.613392 255774 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-376255 --format={{.State.Status}}
I1121 14:29:55.613476 255774 out.go:179] * Verifying Kubernetes components...
I1121 14:29:55.615420 255774 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:55.641842 255774 addons.go:239] Setting addon default-storageclass=true in "default-k8s-diff-port-376255"
I1121 14:29:55.641893 255774 host.go:66] Checking if "default-k8s-diff-port-376255" exists ...
I1121 14:29:55.642317 255774 cli_runner.go:164] Run: docker container inspect default-k8s-diff-port-376255 --format={{.State.Status}}
I1121 14:29:55.647007 255774 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:55.648771 255774 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:29:55.648807 255774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1121 14:29:55.648882 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:55.679690 255774 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1121 14:29:55.679713 255774 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1121 14:29:55.679780 255774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" default-k8s-diff-port-376255
I1121 14:29:55.680868 255774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33070 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa Username:docker}
I1121 14:29:55.703091 255774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33070 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/default-k8s-diff-port-376255/id_rsa Username:docker}
I1121 14:29:55.713751 255774 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.85.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1121 14:29:55.781953 255774 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:29:55.795189 255774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:29:55.811872 255774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1121 14:29:55.895061 255774 start.go:977] {"host.minikube.internal": 192.168.85.1} host record injected into CoreDNS's ConfigMap
I1121 14:29:55.896386 255774 node_ready.go:35] waiting up to 6m0s for node "default-k8s-diff-port-376255" to be "Ready" ...
I1121 14:29:56.162438 255774 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
W1121 14:29:52.672645 249617 node_ready.go:57] node "old-k8s-version-012258" has "Ready":"False" status (will retry)
W1121 14:29:55.172665 249617 node_ready.go:57] node "old-k8s-version-012258" has "Ready":"False" status (will retry)
I1121 14:29:54.481750 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:54.981303 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:55.481778 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:55.981846 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:56.481336 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:56.981822 252125 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1121 14:29:57.056720 252125 kubeadm.go:1114] duration metric: took 4.66135199s to wait for elevateKubeSystemPrivileges
I1121 14:29:57.056760 252125 kubeadm.go:403] duration metric: took 16.414821557s to StartCluster
I1121 14:29:57.056783 252125 settings.go:142] acquiring lock: {Name:mkfe3f8167491ec1abfca3e17282002404072955 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:57.056866 252125 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21847-11004/kubeconfig
I1121 14:29:57.059279 252125 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21847-11004/kubeconfig: {Name:mk5d3e3ed379bd47c91313113a93ad7e3f44dbb9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1121 14:29:57.059591 252125 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1121 14:29:57.059595 252125 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1121 14:29:57.059668 252125 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1121 14:29:57.059755 252125 addons.go:70] Setting storage-provisioner=true in profile "no-preload-921956"
I1121 14:29:57.059780 252125 addons.go:239] Setting addon storage-provisioner=true in "no-preload-921956"
I1121 14:29:57.059783 252125 addons.go:70] Setting default-storageclass=true in profile "no-preload-921956"
I1121 14:29:57.059810 252125 host.go:66] Checking if "no-preload-921956" exists ...
I1121 14:29:57.059818 252125 config.go:182] Loaded profile config "no-preload-921956": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1121 14:29:57.059810 252125 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "no-preload-921956"
I1121 14:29:57.060267 252125 cli_runner.go:164] Run: docker container inspect no-preload-921956 --format={{.State.Status}}
I1121 14:29:57.060366 252125 cli_runner.go:164] Run: docker container inspect no-preload-921956 --format={{.State.Status}}
I1121 14:29:57.061615 252125 out.go:179] * Verifying Kubernetes components...
I1121 14:29:57.063049 252125 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1121 14:29:57.087511 252125 addons.go:239] Setting addon default-storageclass=true in "no-preload-921956"
I1121 14:29:57.087574 252125 host.go:66] Checking if "no-preload-921956" exists ...
I1121 14:29:57.088046 252125 cli_runner.go:164] Run: docker container inspect no-preload-921956 --format={{.State.Status}}
I1121 14:29:57.088842 252125 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1121 14:29:57.090553 252125 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:29:57.090577 252125 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1121 14:29:57.090634 252125 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-921956
I1121 14:29:57.113518 252125 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1121 14:29:57.113567 252125 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1121 14:29:57.113644 252125 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-921956
I1121 14:29:57.116604 252125 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33065 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/no-preload-921956/id_rsa Username:docker}
I1121 14:29:57.140626 252125 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33065 SSHKeyPath:/home/jenkins/minikube-integration/21847-11004/.minikube/machines/no-preload-921956/id_rsa Username:docker}
I1121 14:29:57.162241 252125 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.103.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1121 14:29:57.221336 252125 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1121 14:29:57.237060 252125 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1121 14:29:57.259845 252125 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1121 14:29:57.393470 252125 start.go:977] {"host.minikube.internal": 192.168.103.1} host record injected into CoreDNS's ConfigMap
I1121 14:29:57.394577 252125 node_ready.go:35] waiting up to 6m0s for node "no-preload-921956" to be "Ready" ...
I1121 14:29:57.623024 252125 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1121 14:29:57.414885 213058 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (10.125971322s)
W1121 14:29:57.414929 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Unable to connect to the server: net/http: TLS handshake timeout
output:
** stderr **
Unable to connect to the server: net/http: TLS handshake timeout
** /stderr **
I1121 14:29:57.414939 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:29:57.414952 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:29:57.462838 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:29:57.462881 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:29:57.526637 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:29:57.526671 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:29:57.574224 213058 logs.go:123] Gathering logs for container status ...
I1121 14:29:57.574259 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:29:57.624430 252125 addons.go:530] duration metric: took 564.759261ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1121 14:29:57.898009 252125 kapi.go:214] "coredns" deployment in "kube-system" namespace and "no-preload-921956" context rescaled to 1 replicas
I1121 14:29:56.163632 255774 addons.go:530] duration metric: took 551.031985ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1121 14:29:56.399602 255774 kapi.go:214] "coredns" deployment in "kube-system" namespace and "default-k8s-diff-port-376255" context rescaled to 1 replicas
W1121 14:29:57.899680 255774 node_ready.go:57] node "default-k8s-diff-port-376255" has "Ready":"False" status (will retry)
W1121 14:29:57.174208 249617 node_ready.go:57] node "old-k8s-version-012258" has "Ready":"False" status (will retry)
W1121 14:29:59.672116 249617 node_ready.go:57] node "old-k8s-version-012258" has "Ready":"False" status (will retry)
I1121 14:30:00.114035 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
W1121 14:29:59.398191 252125 node_ready.go:57] node "no-preload-921956" has "Ready":"False" status (will retry)
W1121 14:30:01.898360 252125 node_ready.go:57] node "no-preload-921956" has "Ready":"False" status (will retry)
W1121 14:29:59.900344 255774 node_ready.go:57] node "default-k8s-diff-port-376255" has "Ready":"False" status (will retry)
W1121 14:30:01.900816 255774 node_ready.go:57] node "default-k8s-diff-port-376255" has "Ready":"False" status (will retry)
W1121 14:30:04.400331 255774 node_ready.go:57] node "default-k8s-diff-port-376255" has "Ready":"False" status (will retry)
W1121 14:30:01.672252 249617 node_ready.go:57] node "old-k8s-version-012258" has "Ready":"False" status (will retry)
W1121 14:30:04.171805 249617 node_ready.go:57] node "old-k8s-version-012258" has "Ready":"False" status (will retry)
I1121 14:30:05.672011 249617 node_ready.go:49] node "old-k8s-version-012258" is "Ready"
I1121 14:30:05.672046 249617 node_ready.go:38] duration metric: took 15.003519412s for node "old-k8s-version-012258" to be "Ready" ...
I1121 14:30:05.672064 249617 api_server.go:52] waiting for apiserver process to appear ...
I1121 14:30:05.672125 249617 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1121 14:30:05.689799 249617 api_server.go:72] duration metric: took 15.377593574s to wait for apiserver process to appear ...
I1121 14:30:05.689974 249617 api_server.go:88] waiting for apiserver healthz status ...
I1121 14:30:05.690001 249617 api_server.go:253] Checking apiserver healthz at https://192.168.94.2:8443/healthz ...
I1121 14:30:05.696217 249617 api_server.go:279] https://192.168.94.2:8443/healthz returned 200:
ok
I1121 14:30:05.697950 249617 api_server.go:141] control plane version: v1.28.0
I1121 14:30:05.697978 249617 api_server.go:131] duration metric: took 7.994891ms to wait for apiserver health ...
I1121 14:30:05.697990 249617 system_pods.go:43] waiting for kube-system pods to appear ...
I1121 14:30:05.702726 249617 system_pods.go:59] 8 kube-system pods found
I1121 14:30:05.702769 249617 system_pods.go:61] "coredns-5dd5756b68-vst4c" [3ca4df79-d875-498c-91b8-059d4f975bd0] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:05.702778 249617 system_pods.go:61] "etcd-old-k8s-version-012258" [2316d2c5-5731-4804-b900-b3ed4289f3d5] Running
I1121 14:30:05.702785 249617 system_pods.go:61] "kindnet-f6t7s" [bd28a6b5-0214-42be-8883-1adf1217761c] Running
I1121 14:30:05.702796 249617 system_pods.go:61] "kube-apiserver-old-k8s-version-012258" [fb018e50-0892-4250-9f7d-16731a31f2e5] Running
I1121 14:30:05.702808 249617 system_pods.go:61] "kube-controller-manager-old-k8s-version-012258" [7e21a806-9ed1-4e34-a635-f92287ab6545] Running
I1121 14:30:05.702818 249617 system_pods.go:61] "kube-proxy-wsp2w" [bc079c02-40ff-4f10-947b-76f1e9784572] Running
I1121 14:30:05.702822 249617 system_pods.go:61] "kube-scheduler-old-k8s-version-012258" [925c4663-2ad7-41a1-9606-3fbfe8e0904d] Running
I1121 14:30:05.702829 249617 system_pods.go:61] "storage-provisioner" [4195d236-52f6-4bfd-b47a-9cd7cd89bedd] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:05.702837 249617 system_pods.go:74] duration metric: took 4.84094ms to wait for pod list to return data ...
I1121 14:30:05.702852 249617 default_sa.go:34] waiting for default service account to be created ...
I1121 14:30:05.705127 249617 default_sa.go:45] found service account: "default"
I1121 14:30:05.705151 249617 default_sa.go:55] duration metric: took 2.290103ms for default service account to be created ...
I1121 14:30:05.705161 249617 system_pods.go:116] waiting for k8s-apps to be running ...
I1121 14:30:05.710235 249617 system_pods.go:86] 8 kube-system pods found
I1121 14:30:05.710318 249617 system_pods.go:89] "coredns-5dd5756b68-vst4c" [3ca4df79-d875-498c-91b8-059d4f975bd0] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:05.710330 249617 system_pods.go:89] "etcd-old-k8s-version-012258" [2316d2c5-5731-4804-b900-b3ed4289f3d5] Running
I1121 14:30:05.710337 249617 system_pods.go:89] "kindnet-f6t7s" [bd28a6b5-0214-42be-8883-1adf1217761c] Running
I1121 14:30:05.710367 249617 system_pods.go:89] "kube-apiserver-old-k8s-version-012258" [fb018e50-0892-4250-9f7d-16731a31f2e5] Running
I1121 14:30:05.710374 249617 system_pods.go:89] "kube-controller-manager-old-k8s-version-012258" [7e21a806-9ed1-4e34-a635-f92287ab6545] Running
I1121 14:30:05.710380 249617 system_pods.go:89] "kube-proxy-wsp2w" [bc079c02-40ff-4f10-947b-76f1e9784572] Running
I1121 14:30:05.710385 249617 system_pods.go:89] "kube-scheduler-old-k8s-version-012258" [925c4663-2ad7-41a1-9606-3fbfe8e0904d] Running
I1121 14:30:05.710404 249617 system_pods.go:89] "storage-provisioner" [4195d236-52f6-4bfd-b47a-9cd7cd89bedd] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:05.710597 249617 retry.go:31] will retry after 257.065607ms: missing components: kube-dns
I1121 14:30:05.972608 249617 system_pods.go:86] 8 kube-system pods found
I1121 14:30:05.972648 249617 system_pods.go:89] "coredns-5dd5756b68-vst4c" [3ca4df79-d875-498c-91b8-059d4f975bd0] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:05.972657 249617 system_pods.go:89] "etcd-old-k8s-version-012258" [2316d2c5-5731-4804-b900-b3ed4289f3d5] Running
I1121 14:30:05.972665 249617 system_pods.go:89] "kindnet-f6t7s" [bd28a6b5-0214-42be-8883-1adf1217761c] Running
I1121 14:30:05.972676 249617 system_pods.go:89] "kube-apiserver-old-k8s-version-012258" [fb018e50-0892-4250-9f7d-16731a31f2e5] Running
I1121 14:30:05.972682 249617 system_pods.go:89] "kube-controller-manager-old-k8s-version-012258" [7e21a806-9ed1-4e34-a635-f92287ab6545] Running
I1121 14:30:05.972687 249617 system_pods.go:89] "kube-proxy-wsp2w" [bc079c02-40ff-4f10-947b-76f1e9784572] Running
I1121 14:30:05.972692 249617 system_pods.go:89] "kube-scheduler-old-k8s-version-012258" [925c4663-2ad7-41a1-9606-3fbfe8e0904d] Running
I1121 14:30:05.972707 249617 system_pods.go:89] "storage-provisioner" [4195d236-52f6-4bfd-b47a-9cd7cd89bedd] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:05.972726 249617 retry.go:31] will retry after 339.692313ms: missing components: kube-dns
I1121 14:30:06.317124 249617 system_pods.go:86] 8 kube-system pods found
I1121 14:30:06.317155 249617 system_pods.go:89] "coredns-5dd5756b68-vst4c" [3ca4df79-d875-498c-91b8-059d4f975bd0] Running
I1121 14:30:06.317160 249617 system_pods.go:89] "etcd-old-k8s-version-012258" [2316d2c5-5731-4804-b900-b3ed4289f3d5] Running
I1121 14:30:06.317163 249617 system_pods.go:89] "kindnet-f6t7s" [bd28a6b5-0214-42be-8883-1adf1217761c] Running
I1121 14:30:06.317167 249617 system_pods.go:89] "kube-apiserver-old-k8s-version-012258" [fb018e50-0892-4250-9f7d-16731a31f2e5] Running
I1121 14:30:06.317171 249617 system_pods.go:89] "kube-controller-manager-old-k8s-version-012258" [7e21a806-9ed1-4e34-a635-f92287ab6545] Running
I1121 14:30:06.317175 249617 system_pods.go:89] "kube-proxy-wsp2w" [bc079c02-40ff-4f10-947b-76f1e9784572] Running
I1121 14:30:06.317178 249617 system_pods.go:89] "kube-scheduler-old-k8s-version-012258" [925c4663-2ad7-41a1-9606-3fbfe8e0904d] Running
I1121 14:30:06.317181 249617 system_pods.go:89] "storage-provisioner" [4195d236-52f6-4bfd-b47a-9cd7cd89bedd] Running
I1121 14:30:06.317188 249617 system_pods.go:126] duration metric: took 612.020803ms to wait for k8s-apps to be running ...
I1121 14:30:06.317194 249617 system_svc.go:44] waiting for kubelet service to be running ....
I1121 14:30:06.317250 249617 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1121 14:30:06.332295 249617 system_svc.go:56] duration metric: took 15.088564ms WaitForService to wait for kubelet
I1121 14:30:06.332331 249617 kubeadm.go:587] duration metric: took 16.020134285s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1121 14:30:06.332357 249617 node_conditions.go:102] verifying NodePressure condition ...
I1121 14:30:06.338044 249617 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1121 14:30:06.338071 249617 node_conditions.go:123] node cpu capacity is 8
I1121 14:30:06.338084 249617 node_conditions.go:105] duration metric: took 5.72136ms to run NodePressure ...
I1121 14:30:06.338096 249617 start.go:242] waiting for startup goroutines ...
I1121 14:30:06.338102 249617 start.go:247] waiting for cluster config update ...
I1121 14:30:06.338113 249617 start.go:256] writing updated cluster config ...
I1121 14:30:06.338382 249617 ssh_runner.go:195] Run: rm -f paused
I1121 14:30:06.342534 249617 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:30:06.347323 249617 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-vst4c" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.352062 249617 pod_ready.go:94] pod "coredns-5dd5756b68-vst4c" is "Ready"
I1121 14:30:06.352087 249617 pod_ready.go:86] duration metric: took 4.697932ms for pod "coredns-5dd5756b68-vst4c" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.354946 249617 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.359326 249617 pod_ready.go:94] pod "etcd-old-k8s-version-012258" is "Ready"
I1121 14:30:06.359355 249617 pod_ready.go:86] duration metric: took 4.388182ms for pod "etcd-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.362007 249617 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.366060 249617 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-012258" is "Ready"
I1121 14:30:06.366081 249617 pod_ready.go:86] duration metric: took 4.051984ms for pod "kube-apiserver-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.368789 249617 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.746914 249617 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-012258" is "Ready"
I1121 14:30:06.746952 249617 pod_ready.go:86] duration metric: took 378.141903ms for pod "kube-controller-manager-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:06.947790 249617 pod_ready.go:83] waiting for pod "kube-proxy-wsp2w" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:07.347266 249617 pod_ready.go:94] pod "kube-proxy-wsp2w" is "Ready"
I1121 14:30:07.347291 249617 pod_ready.go:86] duration metric: took 399.477159ms for pod "kube-proxy-wsp2w" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:07.547233 249617 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:07.946728 249617 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-012258" is "Ready"
I1121 14:30:07.946756 249617 pod_ready.go:86] duration metric: took 399.500525ms for pod "kube-scheduler-old-k8s-version-012258" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:07.946772 249617 pod_ready.go:40] duration metric: took 1.604187461s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:30:08.009909 249617 start.go:628] kubectl: 1.34.2, cluster: 1.28.0 (minor skew: 6)
I1121 14:30:08.014607 249617 out.go:203]
W1121 14:30:08.016075 249617 out.go:285] ! /usr/local/bin/kubectl is version 1.34.2, which may have incompatibilities with Kubernetes 1.28.0.
I1121 14:30:08.020782 249617 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1121 14:30:08.022622 249617 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-012258" cluster and "default" namespace by default
I1121 14:30:05.115052 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1121 14:30:05.115115 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:30:05.115188 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:30:05.143819 213058 cri.go:89] found id: "56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324"
I1121 14:30:05.143839 213058 cri.go:89] found id: "9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:30:05.143843 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:30:05.143846 213058 cri.go:89] found id: ""
I1121 14:30:05.143853 213058 logs.go:282] 3 containers: [56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:30:05.143912 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.148585 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.152984 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.156944 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:30:05.157004 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:30:05.185404 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:30:05.185430 213058 cri.go:89] found id: ""
I1121 14:30:05.185440 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:30:05.185498 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.190360 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:30:05.190432 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:30:05.222964 213058 cri.go:89] found id: ""
I1121 14:30:05.222989 213058 logs.go:282] 0 containers: []
W1121 14:30:05.222999 213058 logs.go:284] No container was found matching "coredns"
I1121 14:30:05.223006 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:30:05.223058 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:30:05.254414 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:30:05.254436 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:30:05.254440 213058 cri.go:89] found id: ""
I1121 14:30:05.254447 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:30:05.254505 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.258766 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.262456 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:30:05.262524 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:30:05.288454 213058 cri.go:89] found id: ""
I1121 14:30:05.288486 213058 logs.go:282] 0 containers: []
W1121 14:30:05.288496 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:30:05.288505 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:30:05.288598 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:30:05.317814 213058 cri.go:89] found id: "652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb"
I1121 14:30:05.317841 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:30:05.317847 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:30:05.317851 213058 cri.go:89] found id: ""
I1121 14:30:05.317861 213058 logs.go:282] 3 containers: [652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:30:05.317930 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.322506 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.326684 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:05.330828 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:30:05.330957 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:30:05.360073 213058 cri.go:89] found id: ""
I1121 14:30:05.360098 213058 logs.go:282] 0 containers: []
W1121 14:30:05.360107 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:30:05.360116 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:30:05.360171 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:30:05.388524 213058 cri.go:89] found id: ""
I1121 14:30:05.388561 213058 logs.go:282] 0 containers: []
W1121 14:30:05.388573 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:30:05.388587 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:30:05.388602 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:30:05.427247 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:30:05.427279 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:30:05.517583 213058 logs.go:123] Gathering logs for kube-apiserver [56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324] ...
I1121 14:30:05.517615 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324"
I1121 14:30:05.556205 213058 logs.go:123] Gathering logs for kube-apiserver [9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1] ...
I1121 14:30:05.556238 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9a2b62669bb541c95ccc48a3bee10da7faccb77514f7c516ac47db9503f234b1"
I1121 14:30:05.601637 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:30:05.601692 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:30:05.642125 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:30:05.642167 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:30:05.707252 213058 logs.go:123] Gathering logs for container status ...
I1121 14:30:05.707295 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:30:05.747947 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:30:05.747990 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:30:05.767646 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:30:05.767678 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:30:04.398534 252125 node_ready.go:57] node "no-preload-921956" has "Ready":"False" status (will retry)
W1121 14:30:06.897181 252125 node_ready.go:57] node "no-preload-921956" has "Ready":"False" status (will retry)
W1121 14:30:08.897492 252125 node_ready.go:57] node "no-preload-921956" has "Ready":"False" status (will retry)
W1121 14:30:06.900285 255774 node_ready.go:57] node "default-k8s-diff-port-376255" has "Ready":"False" status (will retry)
I1121 14:30:07.400113 255774 node_ready.go:49] node "default-k8s-diff-port-376255" is "Ready"
I1121 14:30:07.400148 255774 node_ready.go:38] duration metric: took 11.503726167s for node "default-k8s-diff-port-376255" to be "Ready" ...
I1121 14:30:07.400166 255774 api_server.go:52] waiting for apiserver process to appear ...
I1121 14:30:07.400227 255774 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1121 14:30:07.416428 255774 api_server.go:72] duration metric: took 11.804040955s to wait for apiserver process to appear ...
I1121 14:30:07.416462 255774 api_server.go:88] waiting for apiserver healthz status ...
I1121 14:30:07.416487 255774 api_server.go:253] Checking apiserver healthz at https://192.168.85.2:8444/healthz ...
I1121 14:30:07.423355 255774 api_server.go:279] https://192.168.85.2:8444/healthz returned 200:
ok
I1121 14:30:07.424441 255774 api_server.go:141] control plane version: v1.34.1
I1121 14:30:07.424471 255774 api_server.go:131] duration metric: took 8.001103ms to wait for apiserver health ...
I1121 14:30:07.424480 255774 system_pods.go:43] waiting for kube-system pods to appear ...
I1121 14:30:07.428816 255774 system_pods.go:59] 8 kube-system pods found
I1121 14:30:07.428856 255774 system_pods.go:61] "coredns-66bc5c9577-fr27b" [aecd7b98-657f-464e-9860-d060714bbc5d] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:07.428866 255774 system_pods.go:61] "etcd-default-k8s-diff-port-376255" [b46a8392-a768-4a1b-9a89-b0c3c349dc99] Running
I1121 14:30:07.428874 255774 system_pods.go:61] "kindnet-cdzd4" [f954f962-f79a-49e5-8b79-5fbd3c544ffc] Running
I1121 14:30:07.428880 255774 system_pods.go:61] "kube-apiserver-default-k8s-diff-port-376255" [727fff75-5ef1-4665-a510-82662517dd6f] Running
I1121 14:30:07.428886 255774 system_pods.go:61] "kube-controller-manager-default-k8s-diff-port-376255" [d642ccae-4e43-4b4b-9d8d-51515a1aae9c] Running
I1121 14:30:07.428891 255774 system_pods.go:61] "kube-proxy-hdplf" [f4b8f54c-361f-4748-9f31-92ffb753f404] Running
I1121 14:30:07.428899 255774 system_pods.go:61] "kube-scheduler-default-k8s-diff-port-376255" [72272f12-0226-4c07-9867-6cedf46539a4] Running
I1121 14:30:07.428912 255774 system_pods.go:61] "storage-provisioner" [4fa1d228-0310-45d2-87b6-91ce085f1f58] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:07.428921 255774 system_pods.go:74] duration metric: took 4.433771ms to wait for pod list to return data ...
I1121 14:30:07.428932 255774 default_sa.go:34] waiting for default service account to be created ...
I1121 14:30:07.431771 255774 default_sa.go:45] found service account: "default"
I1121 14:30:07.431794 255774 default_sa.go:55] duration metric: took 2.856811ms for default service account to be created ...
I1121 14:30:07.431804 255774 system_pods.go:116] waiting for k8s-apps to be running ...
I1121 14:30:07.435787 255774 system_pods.go:86] 8 kube-system pods found
I1121 14:30:07.435816 255774 system_pods.go:89] "coredns-66bc5c9577-fr27b" [aecd7b98-657f-464e-9860-d060714bbc5d] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:07.435821 255774 system_pods.go:89] "etcd-default-k8s-diff-port-376255" [b46a8392-a768-4a1b-9a89-b0c3c349dc99] Running
I1121 14:30:07.435826 255774 system_pods.go:89] "kindnet-cdzd4" [f954f962-f79a-49e5-8b79-5fbd3c544ffc] Running
I1121 14:30:07.435830 255774 system_pods.go:89] "kube-apiserver-default-k8s-diff-port-376255" [727fff75-5ef1-4665-a510-82662517dd6f] Running
I1121 14:30:07.435833 255774 system_pods.go:89] "kube-controller-manager-default-k8s-diff-port-376255" [d642ccae-4e43-4b4b-9d8d-51515a1aae9c] Running
I1121 14:30:07.435836 255774 system_pods.go:89] "kube-proxy-hdplf" [f4b8f54c-361f-4748-9f31-92ffb753f404] Running
I1121 14:30:07.435841 255774 system_pods.go:89] "kube-scheduler-default-k8s-diff-port-376255" [72272f12-0226-4c07-9867-6cedf46539a4] Running
I1121 14:30:07.435846 255774 system_pods.go:89] "storage-provisioner" [4fa1d228-0310-45d2-87b6-91ce085f1f58] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:07.435871 255774 retry.go:31] will retry after 217.060579ms: missing components: kube-dns
I1121 14:30:07.656900 255774 system_pods.go:86] 8 kube-system pods found
I1121 14:30:07.656930 255774 system_pods.go:89] "coredns-66bc5c9577-fr27b" [aecd7b98-657f-464e-9860-d060714bbc5d] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:07.656937 255774 system_pods.go:89] "etcd-default-k8s-diff-port-376255" [b46a8392-a768-4a1b-9a89-b0c3c349dc99] Running
I1121 14:30:07.656945 255774 system_pods.go:89] "kindnet-cdzd4" [f954f962-f79a-49e5-8b79-5fbd3c544ffc] Running
I1121 14:30:07.656950 255774 system_pods.go:89] "kube-apiserver-default-k8s-diff-port-376255" [727fff75-5ef1-4665-a510-82662517dd6f] Running
I1121 14:30:07.656955 255774 system_pods.go:89] "kube-controller-manager-default-k8s-diff-port-376255" [d642ccae-4e43-4b4b-9d8d-51515a1aae9c] Running
I1121 14:30:07.656959 255774 system_pods.go:89] "kube-proxy-hdplf" [f4b8f54c-361f-4748-9f31-92ffb753f404] Running
I1121 14:30:07.656964 255774 system_pods.go:89] "kube-scheduler-default-k8s-diff-port-376255" [72272f12-0226-4c07-9867-6cedf46539a4] Running
I1121 14:30:07.656970 255774 system_pods.go:89] "storage-provisioner" [4fa1d228-0310-45d2-87b6-91ce085f1f58] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:07.656989 255774 retry.go:31] will retry after 330.648304ms: missing components: kube-dns
I1121 14:30:07.995514 255774 system_pods.go:86] 8 kube-system pods found
I1121 14:30:07.995612 255774 system_pods.go:89] "coredns-66bc5c9577-fr27b" [aecd7b98-657f-464e-9860-d060714bbc5d] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:07.995626 255774 system_pods.go:89] "etcd-default-k8s-diff-port-376255" [b46a8392-a768-4a1b-9a89-b0c3c349dc99] Running
I1121 14:30:07.995636 255774 system_pods.go:89] "kindnet-cdzd4" [f954f962-f79a-49e5-8b79-5fbd3c544ffc] Running
I1121 14:30:07.995642 255774 system_pods.go:89] "kube-apiserver-default-k8s-diff-port-376255" [727fff75-5ef1-4665-a510-82662517dd6f] Running
I1121 14:30:07.995653 255774 system_pods.go:89] "kube-controller-manager-default-k8s-diff-port-376255" [d642ccae-4e43-4b4b-9d8d-51515a1aae9c] Running
I1121 14:30:07.995659 255774 system_pods.go:89] "kube-proxy-hdplf" [f4b8f54c-361f-4748-9f31-92ffb753f404] Running
I1121 14:30:07.995664 255774 system_pods.go:89] "kube-scheduler-default-k8s-diff-port-376255" [72272f12-0226-4c07-9867-6cedf46539a4] Running
I1121 14:30:07.995683 255774 system_pods.go:89] "storage-provisioner" [4fa1d228-0310-45d2-87b6-91ce085f1f58] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:07.995713 255774 retry.go:31] will retry after 466.383408ms: missing components: kube-dns
I1121 14:30:08.466385 255774 system_pods.go:86] 8 kube-system pods found
I1121 14:30:08.466414 255774 system_pods.go:89] "coredns-66bc5c9577-fr27b" [aecd7b98-657f-464e-9860-d060714bbc5d] Running
I1121 14:30:08.466419 255774 system_pods.go:89] "etcd-default-k8s-diff-port-376255" [b46a8392-a768-4a1b-9a89-b0c3c349dc99] Running
I1121 14:30:08.466423 255774 system_pods.go:89] "kindnet-cdzd4" [f954f962-f79a-49e5-8b79-5fbd3c544ffc] Running
I1121 14:30:08.466427 255774 system_pods.go:89] "kube-apiserver-default-k8s-diff-port-376255" [727fff75-5ef1-4665-a510-82662517dd6f] Running
I1121 14:30:08.466430 255774 system_pods.go:89] "kube-controller-manager-default-k8s-diff-port-376255" [d642ccae-4e43-4b4b-9d8d-51515a1aae9c] Running
I1121 14:30:08.466435 255774 system_pods.go:89] "kube-proxy-hdplf" [f4b8f54c-361f-4748-9f31-92ffb753f404] Running
I1121 14:30:08.466438 255774 system_pods.go:89] "kube-scheduler-default-k8s-diff-port-376255" [72272f12-0226-4c07-9867-6cedf46539a4] Running
I1121 14:30:08.466441 255774 system_pods.go:89] "storage-provisioner" [4fa1d228-0310-45d2-87b6-91ce085f1f58] Running
I1121 14:30:08.466448 255774 system_pods.go:126] duration metric: took 1.034639333s to wait for k8s-apps to be running ...
I1121 14:30:08.466454 255774 system_svc.go:44] waiting for kubelet service to be running ....
I1121 14:30:08.466495 255774 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1121 14:30:08.480058 255774 system_svc.go:56] duration metric: took 13.59071ms WaitForService to wait for kubelet
I1121 14:30:08.480087 255774 kubeadm.go:587] duration metric: took 12.867708638s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1121 14:30:08.480104 255774 node_conditions.go:102] verifying NodePressure condition ...
I1121 14:30:08.483054 255774 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1121 14:30:08.483077 255774 node_conditions.go:123] node cpu capacity is 8
I1121 14:30:08.483089 255774 node_conditions.go:105] duration metric: took 2.980591ms to run NodePressure ...
I1121 14:30:08.483101 255774 start.go:242] waiting for startup goroutines ...
I1121 14:30:08.483107 255774 start.go:247] waiting for cluster config update ...
I1121 14:30:08.483116 255774 start.go:256] writing updated cluster config ...
I1121 14:30:08.483378 255774 ssh_runner.go:195] Run: rm -f paused
I1121 14:30:08.487457 255774 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:30:08.490869 255774 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-fr27b" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:08.495613 255774 pod_ready.go:94] pod "coredns-66bc5c9577-fr27b" is "Ready"
I1121 14:30:08.495638 255774 pod_ready.go:86] duration metric: took 4.745112ms for pod "coredns-66bc5c9577-fr27b" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:08.498070 255774 pod_ready.go:83] waiting for pod "etcd-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:08.502098 255774 pod_ready.go:94] pod "etcd-default-k8s-diff-port-376255" is "Ready"
I1121 14:30:08.502122 255774 pod_ready.go:86] duration metric: took 4.029361ms for pod "etcd-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:08.504276 255774 pod_ready.go:83] waiting for pod "kube-apiserver-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:08.508229 255774 pod_ready.go:94] pod "kube-apiserver-default-k8s-diff-port-376255" is "Ready"
I1121 14:30:08.508250 255774 pod_ready.go:86] duration metric: took 3.957821ms for pod "kube-apiserver-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:08.510387 255774 pod_ready.go:83] waiting for pod "kube-controller-manager-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:08.891344 255774 pod_ready.go:94] pod "kube-controller-manager-default-k8s-diff-port-376255" is "Ready"
I1121 14:30:08.891369 255774 pod_ready.go:86] duration metric: took 380.959206ms for pod "kube-controller-manager-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:09.091636 255774 pod_ready.go:83] waiting for pod "kube-proxy-hdplf" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:09.492078 255774 pod_ready.go:94] pod "kube-proxy-hdplf" is "Ready"
I1121 14:30:09.492108 255774 pod_ready.go:86] duration metric: took 400.444722ms for pod "kube-proxy-hdplf" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:09.693278 255774 pod_ready.go:83] waiting for pod "kube-scheduler-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:10.092105 255774 pod_ready.go:94] pod "kube-scheduler-default-k8s-diff-port-376255" is "Ready"
I1121 14:30:10.092133 255774 pod_ready.go:86] duration metric: took 398.824976ms for pod "kube-scheduler-default-k8s-diff-port-376255" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:10.092146 255774 pod_ready.go:40] duration metric: took 1.604655578s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:30:10.138628 255774 start.go:628] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1121 14:30:10.140593 255774 out.go:179] * Done! kubectl is now configured to use "default-k8s-diff-port-376255" cluster and "default" namespace by default
I1121 14:30:08.754284 213058 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (2.986586875s)
W1121 14:30:08.754342 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Get "https://localhost:8443/api/v1/nodes?limit=500": dial tcp [::1]:8443: connect: connection refused - error from a previous attempt: read tcp [::1]:60538->[::1]:8443: read: connection reset by peer
output:
** stderr **
Get "https://localhost:8443/api/v1/nodes?limit=500": dial tcp [::1]:8443: connect: connection refused - error from a previous attempt: read tcp [::1]:60538->[::1]:8443: read: connection reset by peer
** /stderr **
I1121 14:30:08.754352 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:30:08.754366 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:30:08.789119 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:30:08.789149 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:30:08.842933 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:30:08.842974 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:30:08.880878 213058 logs.go:123] Gathering logs for kube-controller-manager [652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb] ...
I1121 14:30:08.880919 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb"
I1121 14:30:08.910920 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:30:08.910953 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:30:11.440020 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:30:11.440496 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:30:11.440556 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:30:11.440601 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:30:11.472645 213058 cri.go:89] found id: "56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324"
I1121 14:30:11.472669 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:30:11.472674 213058 cri.go:89] found id: ""
I1121 14:30:11.472683 213058 logs.go:282] 2 containers: [56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:30:11.472748 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.478061 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.482946 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:30:11.483034 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:30:11.517693 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:30:11.517722 213058 cri.go:89] found id: ""
I1121 14:30:11.517732 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:30:11.517797 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.523621 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:30:11.523699 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:30:11.559155 213058 cri.go:89] found id: ""
I1121 14:30:11.559194 213058 logs.go:282] 0 containers: []
W1121 14:30:11.559204 213058 logs.go:284] No container was found matching "coredns"
I1121 14:30:11.559212 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:30:11.559271 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:30:11.595093 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:30:11.595127 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:30:11.595133 213058 cri.go:89] found id: ""
I1121 14:30:11.595143 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:30:11.595194 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.600085 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.604973 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:30:11.605048 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:30:11.639606 213058 cri.go:89] found id: ""
I1121 14:30:11.639636 213058 logs.go:282] 0 containers: []
W1121 14:30:11.639647 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:30:11.639653 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:30:11.639713 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:30:11.684373 213058 cri.go:89] found id: "652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb"
I1121 14:30:11.684400 213058 cri.go:89] found id: "94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:30:11.684405 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:30:11.684410 213058 cri.go:89] found id: ""
I1121 14:30:11.684421 213058 logs.go:282] 3 containers: [652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:30:11.684482 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.689732 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.695253 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:11.701315 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:30:11.701388 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:30:11.732802 213058 cri.go:89] found id: ""
I1121 14:30:11.732831 213058 logs.go:282] 0 containers: []
W1121 14:30:11.732841 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:30:11.732848 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:30:11.732907 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:30:11.761686 213058 cri.go:89] found id: ""
I1121 14:30:11.761717 213058 logs.go:282] 0 containers: []
W1121 14:30:11.761729 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:30:11.761741 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:30:11.761756 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:30:11.816634 213058 logs.go:123] Gathering logs for kube-controller-manager [652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb] ...
I1121 14:30:11.816670 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb"
I1121 14:30:11.846024 213058 logs.go:123] Gathering logs for kube-controller-manager [94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3] ...
I1121 14:30:11.846055 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 94ee5c394341614224319acbb63aafbedcfdbe7f50d3f046a56ab246dc32ceb3"
I1121 14:30:11.876932 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:30:11.876964 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:30:11.912984 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:30:11.913018 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:30:11.965381 213058 logs.go:123] Gathering logs for container status ...
I1121 14:30:11.965423 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:30:11.997477 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:30:11.997509 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:30:12.011497 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:30:12.011524 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:30:12.071024 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:30:12.071049 213058 logs.go:123] Gathering logs for kube-apiserver [56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324] ...
I1121 14:30:12.071065 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324"
I1121 14:30:12.106865 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:30:12.106898 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:30:12.141245 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:30:12.141276 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:30:12.176551 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:30:12.176600 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:30:12.268742 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:30:12.268780 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
W1121 14:30:10.897620 252125 node_ready.go:57] node "no-preload-921956" has "Ready":"False" status (will retry)
I1121 14:30:11.398100 252125 node_ready.go:49] node "no-preload-921956" is "Ready"
I1121 14:30:11.398128 252125 node_ready.go:38] duration metric: took 14.003530083s for node "no-preload-921956" to be "Ready" ...
I1121 14:30:11.398142 252125 api_server.go:52] waiting for apiserver process to appear ...
I1121 14:30:11.398195 252125 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1121 14:30:11.412043 252125 api_server.go:72] duration metric: took 14.35241025s to wait for apiserver process to appear ...
I1121 14:30:11.412070 252125 api_server.go:88] waiting for apiserver healthz status ...
I1121 14:30:11.412087 252125 api_server.go:253] Checking apiserver healthz at https://192.168.103.2:8443/healthz ...
I1121 14:30:11.417254 252125 api_server.go:279] https://192.168.103.2:8443/healthz returned 200:
ok
I1121 14:30:11.418517 252125 api_server.go:141] control plane version: v1.34.1
I1121 14:30:11.418570 252125 api_server.go:131] duration metric: took 6.492303ms to wait for apiserver health ...
I1121 14:30:11.418581 252125 system_pods.go:43] waiting for kube-system pods to appear ...
I1121 14:30:11.421927 252125 system_pods.go:59] 8 kube-system pods found
I1121 14:30:11.422024 252125 system_pods.go:61] "coredns-66bc5c9577-s4rzb" [4941c273-72bf-49af-ad72-793444a43d21] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:11.422034 252125 system_pods.go:61] "etcd-no-preload-921956" [2b973978-8ff2-488f-b54b-80bb44d4f320] Running
I1121 14:30:11.422047 252125 system_pods.go:61] "kindnet-kf24h" [c698f297-3ff4-4f90-a871-5c4c944b9e61] Running
I1121 14:30:11.422059 252125 system_pods.go:61] "kube-apiserver-no-preload-921956" [11865678-b4f0-4cb1-9f82-9c59edf0d6e6] Running
I1121 14:30:11.422069 252125 system_pods.go:61] "kube-controller-manager-no-preload-921956" [5740abab-80b7-4352-8d44-40c9ad7fc713] Running
I1121 14:30:11.422073 252125 system_pods.go:61] "kube-proxy-wmx7z" [7d5a84f9-144c-4920-a08d-478587a56498] Running
I1121 14:30:11.422077 252125 system_pods.go:61] "kube-scheduler-no-preload-921956" [a200f6cd-f579-45e7-9f94-080ca622a30b] Running
I1121 14:30:11.422082 252125 system_pods.go:61] "storage-provisioner" [75fb9c04-833c-4511-83c7-380f4848e49d] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:11.422094 252125 system_pods.go:74] duration metric: took 3.505153ms to wait for pod list to return data ...
I1121 14:30:11.422109 252125 default_sa.go:34] waiting for default service account to be created ...
I1121 14:30:11.424685 252125 default_sa.go:45] found service account: "default"
I1121 14:30:11.424710 252125 default_sa.go:55] duration metric: took 2.591611ms for default service account to be created ...
I1121 14:30:11.424722 252125 system_pods.go:116] waiting for k8s-apps to be running ...
I1121 14:30:11.427627 252125 system_pods.go:86] 8 kube-system pods found
I1121 14:30:11.427680 252125 system_pods.go:89] "coredns-66bc5c9577-s4rzb" [4941c273-72bf-49af-ad72-793444a43d21] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:11.427689 252125 system_pods.go:89] "etcd-no-preload-921956" [2b973978-8ff2-488f-b54b-80bb44d4f320] Running
I1121 14:30:11.427703 252125 system_pods.go:89] "kindnet-kf24h" [c698f297-3ff4-4f90-a871-5c4c944b9e61] Running
I1121 14:30:11.427713 252125 system_pods.go:89] "kube-apiserver-no-preload-921956" [11865678-b4f0-4cb1-9f82-9c59edf0d6e6] Running
I1121 14:30:11.427721 252125 system_pods.go:89] "kube-controller-manager-no-preload-921956" [5740abab-80b7-4352-8d44-40c9ad7fc713] Running
I1121 14:30:11.427726 252125 system_pods.go:89] "kube-proxy-wmx7z" [7d5a84f9-144c-4920-a08d-478587a56498] Running
I1121 14:30:11.427731 252125 system_pods.go:89] "kube-scheduler-no-preload-921956" [a200f6cd-f579-45e7-9f94-080ca622a30b] Running
I1121 14:30:11.427737 252125 system_pods.go:89] "storage-provisioner" [75fb9c04-833c-4511-83c7-380f4848e49d] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:11.427768 252125 retry.go:31] will retry after 234.428318ms: missing components: kube-dns
I1121 14:30:11.669788 252125 system_pods.go:86] 8 kube-system pods found
I1121 14:30:11.669831 252125 system_pods.go:89] "coredns-66bc5c9577-s4rzb" [4941c273-72bf-49af-ad72-793444a43d21] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:11.669840 252125 system_pods.go:89] "etcd-no-preload-921956" [2b973978-8ff2-488f-b54b-80bb44d4f320] Running
I1121 14:30:11.669850 252125 system_pods.go:89] "kindnet-kf24h" [c698f297-3ff4-4f90-a871-5c4c944b9e61] Running
I1121 14:30:11.669858 252125 system_pods.go:89] "kube-apiserver-no-preload-921956" [11865678-b4f0-4cb1-9f82-9c59edf0d6e6] Running
I1121 14:30:11.669865 252125 system_pods.go:89] "kube-controller-manager-no-preload-921956" [5740abab-80b7-4352-8d44-40c9ad7fc713] Running
I1121 14:30:11.669871 252125 system_pods.go:89] "kube-proxy-wmx7z" [7d5a84f9-144c-4920-a08d-478587a56498] Running
I1121 14:30:11.669877 252125 system_pods.go:89] "kube-scheduler-no-preload-921956" [a200f6cd-f579-45e7-9f94-080ca622a30b] Running
I1121 14:30:11.669893 252125 system_pods.go:89] "storage-provisioner" [75fb9c04-833c-4511-83c7-380f4848e49d] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:11.669919 252125 retry.go:31] will retry after 250.085803ms: missing components: kube-dns
I1121 14:30:11.924517 252125 system_pods.go:86] 8 kube-system pods found
I1121 14:30:11.924602 252125 system_pods.go:89] "coredns-66bc5c9577-s4rzb" [4941c273-72bf-49af-ad72-793444a43d21] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:11.924614 252125 system_pods.go:89] "etcd-no-preload-921956" [2b973978-8ff2-488f-b54b-80bb44d4f320] Running
I1121 14:30:11.924627 252125 system_pods.go:89] "kindnet-kf24h" [c698f297-3ff4-4f90-a871-5c4c944b9e61] Running
I1121 14:30:11.924633 252125 system_pods.go:89] "kube-apiserver-no-preload-921956" [11865678-b4f0-4cb1-9f82-9c59edf0d6e6] Running
I1121 14:30:11.924642 252125 system_pods.go:89] "kube-controller-manager-no-preload-921956" [5740abab-80b7-4352-8d44-40c9ad7fc713] Running
I1121 14:30:11.924647 252125 system_pods.go:89] "kube-proxy-wmx7z" [7d5a84f9-144c-4920-a08d-478587a56498] Running
I1121 14:30:11.924653 252125 system_pods.go:89] "kube-scheduler-no-preload-921956" [a200f6cd-f579-45e7-9f94-080ca622a30b] Running
I1121 14:30:11.924661 252125 system_pods.go:89] "storage-provisioner" [75fb9c04-833c-4511-83c7-380f4848e49d] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:11.924682 252125 retry.go:31] will retry after 441.862758ms: missing components: kube-dns
I1121 14:30:12.371065 252125 system_pods.go:86] 8 kube-system pods found
I1121 14:30:12.371110 252125 system_pods.go:89] "coredns-66bc5c9577-s4rzb" [4941c273-72bf-49af-ad72-793444a43d21] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1121 14:30:12.371122 252125 system_pods.go:89] "etcd-no-preload-921956" [2b973978-8ff2-488f-b54b-80bb44d4f320] Running
I1121 14:30:12.371131 252125 system_pods.go:89] "kindnet-kf24h" [c698f297-3ff4-4f90-a871-5c4c944b9e61] Running
I1121 14:30:12.371136 252125 system_pods.go:89] "kube-apiserver-no-preload-921956" [11865678-b4f0-4cb1-9f82-9c59edf0d6e6] Running
I1121 14:30:12.371142 252125 system_pods.go:89] "kube-controller-manager-no-preload-921956" [5740abab-80b7-4352-8d44-40c9ad7fc713] Running
I1121 14:30:12.371147 252125 system_pods.go:89] "kube-proxy-wmx7z" [7d5a84f9-144c-4920-a08d-478587a56498] Running
I1121 14:30:12.371158 252125 system_pods.go:89] "kube-scheduler-no-preload-921956" [a200f6cd-f579-45e7-9f94-080ca622a30b] Running
I1121 14:30:12.371170 252125 system_pods.go:89] "storage-provisioner" [75fb9c04-833c-4511-83c7-380f4848e49d] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1121 14:30:12.371189 252125 retry.go:31] will retry after 502.578888ms: missing components: kube-dns
I1121 14:30:12.879209 252125 system_pods.go:86] 8 kube-system pods found
I1121 14:30:12.879243 252125 system_pods.go:89] "coredns-66bc5c9577-s4rzb" [4941c273-72bf-49af-ad72-793444a43d21] Running
I1121 14:30:12.879249 252125 system_pods.go:89] "etcd-no-preload-921956" [2b973978-8ff2-488f-b54b-80bb44d4f320] Running
I1121 14:30:12.879253 252125 system_pods.go:89] "kindnet-kf24h" [c698f297-3ff4-4f90-a871-5c4c944b9e61] Running
I1121 14:30:12.879258 252125 system_pods.go:89] "kube-apiserver-no-preload-921956" [11865678-b4f0-4cb1-9f82-9c59edf0d6e6] Running
I1121 14:30:12.879268 252125 system_pods.go:89] "kube-controller-manager-no-preload-921956" [5740abab-80b7-4352-8d44-40c9ad7fc713] Running
I1121 14:30:12.879271 252125 system_pods.go:89] "kube-proxy-wmx7z" [7d5a84f9-144c-4920-a08d-478587a56498] Running
I1121 14:30:12.879275 252125 system_pods.go:89] "kube-scheduler-no-preload-921956" [a200f6cd-f579-45e7-9f94-080ca622a30b] Running
I1121 14:30:12.879278 252125 system_pods.go:89] "storage-provisioner" [75fb9c04-833c-4511-83c7-380f4848e49d] Running
I1121 14:30:12.879289 252125 system_pods.go:126] duration metric: took 1.454561179s to wait for k8s-apps to be running ...
I1121 14:30:12.879301 252125 system_svc.go:44] waiting for kubelet service to be running ....
I1121 14:30:12.879351 252125 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1121 14:30:12.894061 252125 system_svc.go:56] duration metric: took 14.74714ms WaitForService to wait for kubelet
I1121 14:30:12.894092 252125 kubeadm.go:587] duration metric: took 15.834465857s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1121 14:30:12.894115 252125 node_conditions.go:102] verifying NodePressure condition ...
I1121 14:30:12.897599 252125 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1121 14:30:12.897630 252125 node_conditions.go:123] node cpu capacity is 8
I1121 14:30:12.897641 252125 node_conditions.go:105] duration metric: took 3.520753ms to run NodePressure ...
I1121 14:30:12.897652 252125 start.go:242] waiting for startup goroutines ...
I1121 14:30:12.897659 252125 start.go:247] waiting for cluster config update ...
I1121 14:30:12.897669 252125 start.go:256] writing updated cluster config ...
I1121 14:30:12.897983 252125 ssh_runner.go:195] Run: rm -f paused
I1121 14:30:12.902897 252125 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:30:12.906562 252125 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-s4rzb" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:12.912263 252125 pod_ready.go:94] pod "coredns-66bc5c9577-s4rzb" is "Ready"
I1121 14:30:12.912286 252125 pod_ready.go:86] duration metric: took 5.702456ms for pod "coredns-66bc5c9577-s4rzb" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:12.915190 252125 pod_ready.go:83] waiting for pod "etcd-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:12.919870 252125 pod_ready.go:94] pod "etcd-no-preload-921956" is "Ready"
I1121 14:30:12.919896 252125 pod_ready.go:86] duration metric: took 4.68423ms for pod "etcd-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:12.921926 252125 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:12.925984 252125 pod_ready.go:94] pod "kube-apiserver-no-preload-921956" is "Ready"
I1121 14:30:12.926012 252125 pod_ready.go:86] duration metric: took 4.065762ms for pod "kube-apiserver-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:12.928283 252125 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:13.307608 252125 pod_ready.go:94] pod "kube-controller-manager-no-preload-921956" is "Ready"
I1121 14:30:13.307639 252125 pod_ready.go:86] duration metric: took 379.335151ms for pod "kube-controller-manager-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:13.508229 252125 pod_ready.go:83] waiting for pod "kube-proxy-wmx7z" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:13.907070 252125 pod_ready.go:94] pod "kube-proxy-wmx7z" is "Ready"
I1121 14:30:13.907101 252125 pod_ready.go:86] duration metric: took 398.843128ms for pod "kube-proxy-wmx7z" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:14.108040 252125 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:14.507264 252125 pod_ready.go:94] pod "kube-scheduler-no-preload-921956" is "Ready"
I1121 14:30:14.507293 252125 pod_ready.go:86] duration metric: took 399.219492ms for pod "kube-scheduler-no-preload-921956" in "kube-system" namespace to be "Ready" or be gone ...
I1121 14:30:14.507307 252125 pod_ready.go:40] duration metric: took 1.604362709s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1121 14:30:14.554506 252125 start.go:628] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1121 14:30:14.556366 252125 out.go:179] * Done! kubectl is now configured to use "no-preload-921956" cluster and "default" namespace by default
I1121 14:30:14.802507 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:30:14.803048 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:30:14.803100 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:30:14.803156 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:30:14.832438 213058 cri.go:89] found id: "56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324"
I1121 14:30:14.832464 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:30:14.832469 213058 cri.go:89] found id: ""
I1121 14:30:14.832479 213058 logs.go:282] 2 containers: [56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:30:14.832560 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:14.836869 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:14.840970 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:30:14.841027 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:30:14.869276 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:30:14.869297 213058 cri.go:89] found id: ""
I1121 14:30:14.869306 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:30:14.869364 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:14.873530 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:30:14.873616 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:30:14.902293 213058 cri.go:89] found id: ""
I1121 14:30:14.902325 213058 logs.go:282] 0 containers: []
W1121 14:30:14.902336 213058 logs.go:284] No container was found matching "coredns"
I1121 14:30:14.902343 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:30:14.902396 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:30:14.931422 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:30:14.931444 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:30:14.931448 213058 cri.go:89] found id: ""
I1121 14:30:14.931455 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:30:14.931507 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:14.936188 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:14.940673 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:30:14.940742 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:30:14.969277 213058 cri.go:89] found id: ""
I1121 14:30:14.969308 213058 logs.go:282] 0 containers: []
W1121 14:30:14.969320 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:30:14.969328 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:30:14.969386 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:30:14.999162 213058 cri.go:89] found id: "652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb"
I1121 14:30:14.999190 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:30:14.999195 213058 cri.go:89] found id: ""
I1121 14:30:14.999209 213058 logs.go:282] 2 containers: [652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:30:14.999275 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:15.003627 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:15.008044 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:30:15.008149 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:30:15.036025 213058 cri.go:89] found id: ""
I1121 14:30:15.036050 213058 logs.go:282] 0 containers: []
W1121 14:30:15.036061 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:30:15.036069 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:30:15.036123 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:30:15.064814 213058 cri.go:89] found id: ""
I1121 14:30:15.064840 213058 logs.go:282] 0 containers: []
W1121 14:30:15.064851 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:30:15.064863 213058 logs.go:123] Gathering logs for kube-apiserver [934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780] ...
I1121 14:30:15.064877 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:30:15.105369 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:30:15.105412 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:30:15.145479 213058 logs.go:123] Gathering logs for kube-controller-manager [56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463] ...
I1121 14:30:15.145521 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:30:15.186460 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:30:15.186498 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:30:15.233156 213058 logs.go:123] Gathering logs for kubelet ...
I1121 14:30:15.233196 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1121 14:30:15.328776 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:30:15.328824 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:30:15.343510 213058 logs.go:123] Gathering logs for kube-apiserver [56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324] ...
I1121 14:30:15.343556 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324"
I1121 14:30:15.375919 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:30:15.375959 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:30:15.412267 213058 logs.go:123] Gathering logs for kube-scheduler [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6] ...
I1121 14:30:15.412310 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:30:15.467388 213058 logs.go:123] Gathering logs for kube-controller-manager [652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb] ...
I1121 14:30:15.467422 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb"
I1121 14:30:15.495400 213058 logs.go:123] Gathering logs for container status ...
I1121 14:30:15.495451 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1121 14:30:15.527880 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:30:15.527906 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:30:15.589380 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:30:18.090626 213058 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1121 14:30:18.091055 213058 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1121 14:30:18.091106 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1121 14:30:18.091154 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1121 14:30:18.119750 213058 cri.go:89] found id: "56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324"
I1121 14:30:18.119777 213058 cri.go:89] found id: "934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780"
I1121 14:30:18.119781 213058 cri.go:89] found id: ""
I1121 14:30:18.119788 213058 logs.go:282] 2 containers: [56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324 934eed7bbf3dc6a22575be8055cd940b96038e22e5cd6f3463961a46d6046780]
I1121 14:30:18.119846 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:18.124441 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:18.128481 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1121 14:30:18.128574 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1121 14:30:18.155968 213058 cri.go:89] found id: "4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
I1121 14:30:18.155990 213058 cri.go:89] found id: ""
I1121 14:30:18.156000 213058 logs.go:282] 1 containers: [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359]
I1121 14:30:18.156056 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:18.160457 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1121 14:30:18.160529 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1121 14:30:18.191869 213058 cri.go:89] found id: ""
I1121 14:30:18.191899 213058 logs.go:282] 0 containers: []
W1121 14:30:18.191909 213058 logs.go:284] No container was found matching "coredns"
I1121 14:30:18.191916 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1121 14:30:18.191990 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1121 14:30:18.222614 213058 cri.go:89] found id: "e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6"
I1121 14:30:18.222639 213058 cri.go:89] found id: "f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:30:18.222644 213058 cri.go:89] found id: ""
I1121 14:30:18.222653 213058 logs.go:282] 2 containers: [e6dfb6e1dc1c8265272d63c384327daa0fd5fbe86ca50bd0d4f8752e8874a0b6 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545]
I1121 14:30:18.222710 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:18.227248 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:18.231976 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1121 14:30:18.232054 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1121 14:30:18.261651 213058 cri.go:89] found id: ""
I1121 14:30:18.261686 213058 logs.go:282] 0 containers: []
W1121 14:30:18.261696 213058 logs.go:284] No container was found matching "kube-proxy"
I1121 14:30:18.261703 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1121 14:30:18.261756 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1121 14:30:18.293248 213058 cri.go:89] found id: "652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb"
I1121 14:30:18.293277 213058 cri.go:89] found id: "56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463"
I1121 14:30:18.293283 213058 cri.go:89] found id: ""
I1121 14:30:18.293291 213058 logs.go:282] 2 containers: [652f4807df85eaf29df01467f1035127421360ce721d4dee3abaffd4baf2fbcb 56b18d01a7baccdb5c947bc18623de36abd1dd8bc833918c4928f4b6da860463]
I1121 14:30:18.293360 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:18.297988 213058 ssh_runner.go:195] Run: which crictl
I1121 14:30:18.302375 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1121 14:30:18.302444 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1121 14:30:18.331900 213058 cri.go:89] found id: ""
I1121 14:30:18.331976 213058 logs.go:282] 0 containers: []
W1121 14:30:18.331989 213058 logs.go:284] No container was found matching "kindnet"
I1121 14:30:18.331997 213058 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1121 14:30:18.332053 213058 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1121 14:30:18.362314 213058 cri.go:89] found id: ""
I1121 14:30:18.362341 213058 logs.go:282] 0 containers: []
W1121 14:30:18.362351 213058 logs.go:284] No container was found matching "storage-provisioner"
I1121 14:30:18.362363 213058 logs.go:123] Gathering logs for kube-scheduler [f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545] ...
I1121 14:30:18.362378 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 f5bffdee5fed58984f6a49db6828b64640859bea1305268fef6a66c2fda74545"
I1121 14:30:18.401362 213058 logs.go:123] Gathering logs for containerd ...
I1121 14:30:18.401403 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1121 14:30:18.453554 213058 logs.go:123] Gathering logs for dmesg ...
I1121 14:30:18.453597 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1121 14:30:18.470719 213058 logs.go:123] Gathering logs for describe nodes ...
I1121 14:30:18.470750 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1121 14:30:18.535220 213058 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1121 14:30:18.535241 213058 logs.go:123] Gathering logs for kube-apiserver [56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324] ...
I1121 14:30:18.535255 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 56e8102371126ace3f42dda018be1e7af8b34b5b0c82b31bf229739d47944324"
I1121 14:30:18.572460 213058 logs.go:123] Gathering logs for etcd [4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359] ...
I1121 14:30:18.572490 213058 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 4be4eebca5559f67c3b255127a96109d74ccb373ff9909925db2fa4458e85359"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
b902d4d95366e 56cc512116c8f 9 seconds ago Running busybox 0 650f980a2b9de busybox default
4cd21f3197431 6e38f40d628db 15 seconds ago Running storage-provisioner 0 23e45253f8c7e storage-provisioner kube-system
5c05a4ce99693 ead0a4a53df89 15 seconds ago Running coredns 0 4a38fce5ce541 coredns-5dd5756b68-vst4c kube-system
14f62b42937d6 409467f978b4a 26 seconds ago Running kindnet-cni 0 2189620d082f5 kindnet-f6t7s kube-system
7b9fdeac6c297 ea1030da44aa1 29 seconds ago Running kube-proxy 0 7e0d6db9e6b3d kube-proxy-wsp2w kube-system
2ff2d15ad456d f6f496300a2ae 48 seconds ago Running kube-scheduler 0 a2abbb0781499 kube-scheduler-old-k8s-version-012258 kube-system
bff5755d3bb4c bb5e0dde9054c 48 seconds ago Running kube-apiserver 0 0f35f911732de kube-apiserver-old-k8s-version-012258 kube-system
24c3a525c2057 73deb9a3f7025 48 seconds ago Running etcd 0 11bd8f3a7d6a7 etcd-old-k8s-version-012258 kube-system
9694941d50234 4be79c38a4bab 48 seconds ago Running kube-controller-manager 0 45f5f9128f983 kube-controller-manager-old-k8s-version-012258 kube-system
==> containerd <==
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.663617176Z" level=info msg="StartContainer for \"5c05a4ce996931fe774ecca66b33620ebb8a09a835d63b1f0ddd04105345bb76\""
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.663619446Z" level=info msg="Container 4cd21f31974314e5db6d58ee50bbd67f0daf675c91355ac568f2d0140f7a8d6c: CDI devices from CRI Config.CDIDevices: []"
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.664751507Z" level=info msg="connecting to shim 5c05a4ce996931fe774ecca66b33620ebb8a09a835d63b1f0ddd04105345bb76" address="unix:///run/containerd/s/0b88234bafabade7aa89e6626d296420e30066b3991abfec21350310268aa8a7" protocol=ttrpc version=3
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.672254961Z" level=info msg="CreateContainer within sandbox \"23e45253f8c7ee6d14427e06305531cf9d976c8c976bd1a48cedecbea7976313\" for &ContainerMetadata{Name:storage-provisioner,Attempt:0,} returns container id \"4cd21f31974314e5db6d58ee50bbd67f0daf675c91355ac568f2d0140f7a8d6c\""
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.673493529Z" level=info msg="StartContainer for \"4cd21f31974314e5db6d58ee50bbd67f0daf675c91355ac568f2d0140f7a8d6c\""
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.674511601Z" level=info msg="connecting to shim 4cd21f31974314e5db6d58ee50bbd67f0daf675c91355ac568f2d0140f7a8d6c" address="unix:///run/containerd/s/a82bd5a517bceb0823436c092fd804897bb31601e146a9022325dd22f0adc41d" protocol=ttrpc version=3
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.728082486Z" level=info msg="StartContainer for \"4cd21f31974314e5db6d58ee50bbd67f0daf675c91355ac568f2d0140f7a8d6c\" returns successfully"
Nov 21 14:30:05 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:05.728959460Z" level=info msg="StartContainer for \"5c05a4ce996931fe774ecca66b33620ebb8a09a835d63b1f0ddd04105345bb76\" returns successfully"
Nov 21 14:30:08 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:08.528101810Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:fa895e52-0bff-4604-8b62-fd0f087015e8,Namespace:default,Attempt:0,}"
Nov 21 14:30:08 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:08.569589613Z" level=info msg="connecting to shim 650f980a2b9de14dfd5f63378bb97f102c6ac2132a9ada4c16a5ef068e7d2a2c" address="unix:///run/containerd/s/5e291cbce6d45d78977b32eb821eca28abc28581b57d5fa47a45bc5da629cfec" namespace=k8s.io protocol=ttrpc version=3
Nov 21 14:30:08 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:08.641364674Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:fa895e52-0bff-4604-8b62-fd0f087015e8,Namespace:default,Attempt:0,} returns sandbox id \"650f980a2b9de14dfd5f63378bb97f102c6ac2132a9ada4c16a5ef068e7d2a2c\""
Nov 21 14:30:08 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:08.643152152Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.895297688Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.896188926Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396646"
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.897638365Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.900612481Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.901224670Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.258026607s"
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.901267593Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.903245567Z" level=info msg="CreateContainer within sandbox \"650f980a2b9de14dfd5f63378bb97f102c6ac2132a9ada4c16a5ef068e7d2a2c\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.911518272Z" level=info msg="Container b902d4d95366e27e951b3537262d21dd82f809e7ad84dd34083f4c621ca4b23b: CDI devices from CRI Config.CDIDevices: []"
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.918169889Z" level=info msg="CreateContainer within sandbox \"650f980a2b9de14dfd5f63378bb97f102c6ac2132a9ada4c16a5ef068e7d2a2c\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"b902d4d95366e27e951b3537262d21dd82f809e7ad84dd34083f4c621ca4b23b\""
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.918839732Z" level=info msg="StartContainer for \"b902d4d95366e27e951b3537262d21dd82f809e7ad84dd34083f4c621ca4b23b\""
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.919846340Z" level=info msg="connecting to shim b902d4d95366e27e951b3537262d21dd82f809e7ad84dd34083f4c621ca4b23b" address="unix:///run/containerd/s/5e291cbce6d45d78977b32eb821eca28abc28581b57d5fa47a45bc5da629cfec" protocol=ttrpc version=3
Nov 21 14:30:10 old-k8s-version-012258 containerd[665]: time="2025-11-21T14:30:10.971722510Z" level=info msg="StartContainer for \"b902d4d95366e27e951b3537262d21dd82f809e7ad84dd34083f4c621ca4b23b\" returns successfully"
Nov 21 14:30:17 old-k8s-version-012258 containerd[665]: E1121 14:30:17.320736 665 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [5c05a4ce996931fe774ecca66b33620ebb8a09a835d63b1f0ddd04105345bb76] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 4c7f44b73086be760ec9e64204f63c5cc5a952c8c1c55ba0b41d8fc3315ce3c7d0259d04847cb8b4561043d4549603f3bccfd9b397eeb814eef159d244d26f39
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:46429 - 55004 "HINFO IN 8589807954474471726.703758692042272696. udp 56 false 512" NXDOMAIN qr,rd,ra 131 0.027956792s
==> describe nodes <==
Name: old-k8s-version-012258
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-012258
kubernetes.io/os=linux
minikube.k8s.io/commit=29e0798733fefbdc471fd2bbb38f6a7ae2a26162
minikube.k8s.io/name=old-k8s-version-012258
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_21T14_29_38_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 21 Nov 2025 14:29:34 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-012258
AcquireTime: <unset>
RenewTime: Fri, 21 Nov 2025 14:30:17 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 21 Nov 2025 14:30:07 +0000 Fri, 21 Nov 2025 14:29:33 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 21 Nov 2025 14:30:07 +0000 Fri, 21 Nov 2025 14:29:33 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 21 Nov 2025 14:30:07 +0000 Fri, 21 Nov 2025 14:29:33 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 21 Nov 2025 14:30:07 +0000 Fri, 21 Nov 2025 14:30:05 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.94.2
Hostname: old-k8s-version-012258
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863352Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863352Ki
pods: 110
System Info:
Machine ID: cf10fb2f940d419c1d138723691cfee8
System UUID: b90c39b5-fac8-48f3-bfec-9ba818fb6bc5
Boot ID: f900700b-0668-4d24-87ff-85e15fbda365
Kernel Version: 6.8.0-1044-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12s
kube-system coredns-5dd5756b68-vst4c 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 30s
kube-system etcd-old-k8s-version-012258 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 45s
kube-system kindnet-f6t7s 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 30s
kube-system kube-apiserver-old-k8s-version-012258 250m (3%) 0 (0%) 0 (0%) 0 (0%) 43s
kube-system kube-controller-manager-old-k8s-version-012258 200m (2%) 0 (0%) 0 (0%) 0 (0%) 43s
kube-system kube-proxy-wsp2w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 30s
kube-system kube-scheduler-old-k8s-version-012258 100m (1%) 0 (0%) 0 (0%) 0 (0%) 43s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 30s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 29s kube-proxy
Normal Starting 50s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 49s (x8 over 50s) kubelet Node old-k8s-version-012258 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 49s (x8 over 50s) kubelet Node old-k8s-version-012258 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 49s (x7 over 50s) kubelet Node old-k8s-version-012258 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 49s kubelet Updated Node Allocatable limit across pods
Normal Starting 43s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 43s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 43s kubelet Node old-k8s-version-012258 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 43s kubelet Node old-k8s-version-012258 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 43s kubelet Node old-k8s-version-012258 status is now: NodeHasSufficientPID
Normal RegisteredNode 31s node-controller Node old-k8s-version-012258 event: Registered Node old-k8s-version-012258 in Controller
Normal NodeReady 15s kubelet Node old-k8s-version-012258 status is now: NodeReady
==> dmesg <==
[Nov21 13:17] MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.
[ +0.001887] TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.
[ +0.001001] MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.
[ +0.086016] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.440508] i8042: Warning: Keylock active
[ +0.011202] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.526419] block sda: the capability attribute has been deprecated.
[ +0.095215] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.027093] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +5.485024] kauditd_printk_skb: 47 callbacks suppressed
==> etcd [24c3a525c2057be14d63a0b83d320542988e06c148db3abcea70288b84ad9d55] <==
{"level":"info","ts":"2025-11-21T14:29:32.241252Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"da400bbece288f5a","local-member-id":"dfc97eb0aae75b33","added-peer-id":"dfc97eb0aae75b33","added-peer-peer-urls":["https://192.168.94.2:2380"]}
{"level":"info","ts":"2025-11-21T14:29:32.243038Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-11-21T14:29:32.243254Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.94.2:2380"}
{"level":"info","ts":"2025-11-21T14:29:32.243303Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.94.2:2380"}
{"level":"info","ts":"2025-11-21T14:29:32.24334Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"dfc97eb0aae75b33","initial-advertise-peer-urls":["https://192.168.94.2:2380"],"listen-peer-urls":["https://192.168.94.2:2380"],"advertise-client-urls":["https://192.168.94.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.94.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-21T14:29:32.24338Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-21T14:29:32.527604Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-21T14:29:32.527651Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-21T14:29:32.527692Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 received MsgPreVoteResp from dfc97eb0aae75b33 at term 1"}
{"level":"info","ts":"2025-11-21T14:29:32.527708Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became candidate at term 2"}
{"level":"info","ts":"2025-11-21T14:29:32.527717Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 received MsgVoteResp from dfc97eb0aae75b33 at term 2"}
{"level":"info","ts":"2025-11-21T14:29:32.527728Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became leader at term 2"}
{"level":"info","ts":"2025-11-21T14:29:32.527737Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: dfc97eb0aae75b33 elected leader dfc97eb0aae75b33 at term 2"}
{"level":"info","ts":"2025-11-21T14:29:32.529559Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"dfc97eb0aae75b33","local-member-attributes":"{Name:old-k8s-version-012258 ClientURLs:[https://192.168.94.2:2379]}","request-path":"/0/members/dfc97eb0aae75b33/attributes","cluster-id":"da400bbece288f5a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-21T14:29:32.529578Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-21T14:29:32.529669Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:29:32.529972Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-21T14:29:32.529994Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-21T14:29:32.529757Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-21T14:29:32.5309Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-21T14:29:32.531625Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"da400bbece288f5a","local-member-id":"dfc97eb0aae75b33","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:29:32.53516Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:29:32.535207Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-21T14:29:32.536282Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.94.2:2379"}
{"level":"info","ts":"2025-11-21T14:29:35.645599Z","caller":"traceutil/trace.go:171","msg":"trace[1619369888] transaction","detail":"{read_only:false; response_revision:181; number_of_response:1; }","duration":"103.859179ms","start":"2025-11-21T14:29:35.541719Z","end":"2025-11-21T14:29:35.645578Z","steps":["trace[1619369888] 'process raft request' (duration: 101.685301ms)"],"step_count":1}
==> kernel <==
14:30:20 up 1:12, 0 user, load average: 4.09, 3.08, 1.94
Linux old-k8s-version-012258 6.8.0-1044-gcp #47~22.04.1-Ubuntu SMP Thu Oct 23 21:07:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [14f62b42937d63a9d982189e10059fb863ccdf5ca3eedc2cdab43a2e258708b6] <==
I1121 14:29:54.836873 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1121 14:29:54.837124 1 main.go:139] hostIP = 192.168.94.2
podIP = 192.168.94.2
I1121 14:29:54.837288 1 main.go:148] setting mtu 1500 for CNI
I1121 14:29:54.837307 1 main.go:178] kindnetd IP family: "ipv4"
I1121 14:29:54.837325 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-21T14:29:55Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1121 14:29:55.132056 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1121 14:29:55.132129 1 controller.go:381] "Waiting for informer caches to sync"
I1121 14:29:55.132143 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1121 14:29:55.132319 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1121 14:29:55.432449 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1121 14:29:55.432473 1 metrics.go:72] Registering metrics
I1121 14:29:55.432525 1 controller.go:711] "Syncing nftables rules"
I1121 14:30:05.138150 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}]
I1121 14:30:05.138210 1 main.go:301] handling current node
I1121 14:30:15.134126 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}]
I1121 14:30:15.134169 1 main.go:301] handling current node
==> kube-apiserver [bff5755d3bb4c01170cea10eea2a0bd7eb5e4e85eff679e4fd11f262f20d8b28] <==
I1121 14:29:34.045351 1 cache.go:39] Caches are synced for AvailableConditionController controller
I1121 14:29:34.047124 1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
I1121 14:29:34.047217 1 shared_informer.go:318] Caches are synced for configmaps
I1121 14:29:34.051166 1 controller.go:624] quota admission added evaluator for: namespaces
I1121 14:29:34.059678 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1121 14:29:34.059713 1 aggregator.go:166] initial CRD sync complete...
I1121 14:29:34.059721 1 autoregister_controller.go:141] Starting autoregister controller
I1121 14:29:34.059728 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1121 14:29:34.059737 1 cache.go:39] Caches are synced for autoregister controller
I1121 14:29:34.239983 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1121 14:29:34.956388 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1121 14:29:34.961744 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1121 14:29:34.961779 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1121 14:29:35.529678 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1121 14:29:35.676651 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1121 14:29:35.776358 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1121 14:29:35.783426 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.94.2]
I1121 14:29:35.785070 1 controller.go:624] quota admission added evaluator for: endpoints
I1121 14:29:35.792737 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1121 14:29:35.992086 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1121 14:29:37.085397 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1121 14:29:37.099935 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1121 14:29:37.111942 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1121 14:29:50.620131 1 controller.go:624] quota admission added evaluator for: replicasets.apps
I1121 14:29:50.819999 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
==> kube-controller-manager [9694941d5023471382cb75dbe0e35927477b046c67f0406d94b0c2eab9737245] <==
I1121 14:29:49.846641 1 shared_informer.go:318] Caches are synced for disruption
I1121 14:29:49.855897 1 shared_informer.go:318] Caches are synced for stateful set
I1121 14:29:49.881551 1 shared_informer.go:311] Waiting for caches to sync for garbage collector
I1121 14:29:49.969509 1 shared_informer.go:318] Caches are synced for attach detach
I1121 14:29:50.014167 1 shared_informer.go:318] Caches are synced for resource quota
I1121 14:29:50.025976 1 shared_informer.go:318] Caches are synced for resource quota
I1121 14:29:50.366198 1 shared_informer.go:318] Caches are synced for garbage collector
I1121 14:29:50.366669 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1121 14:29:50.381693 1 shared_informer.go:318] Caches are synced for garbage collector
I1121 14:29:50.624660 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1121 14:29:50.704235 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1121 14:29:50.830312 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-wsp2w"
I1121 14:29:50.831838 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-f6t7s"
I1121 14:29:50.927521 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-vst4c"
I1121 14:29:50.936234 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-qv6fz"
I1121 14:29:50.964100 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="339.351723ms"
I1121 14:29:50.978176 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-qv6fz"
I1121 14:29:50.986743 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="22.057827ms"
I1121 14:29:50.996010 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="8.927032ms"
I1121 14:29:50.996568 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="343.659µs"
I1121 14:30:05.215933 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="100.246µs"
I1121 14:30:05.230917 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="161.827µs"
I1121 14:30:06.296502 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="6.965394ms"
I1121 14:30:06.296638 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="91.82µs"
I1121 14:30:09.770369 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [7b9fdeac6c297da9e16ba05abceeee4a77258137fd28986a17f946713c8ad0fe] <==
I1121 14:29:51.457956 1 server_others.go:69] "Using iptables proxy"
I1121 14:29:51.467641 1 node.go:141] Successfully retrieved node IP: 192.168.94.2
I1121 14:29:51.489328 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1121 14:29:51.492051 1 server_others.go:152] "Using iptables Proxier"
I1121 14:29:51.492086 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1121 14:29:51.492094 1 server_others.go:438] "Defaulting to no-op detect-local"
I1121 14:29:51.492128 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1121 14:29:51.492424 1 server.go:846] "Version info" version="v1.28.0"
I1121 14:29:51.492443 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1121 14:29:51.493149 1 config.go:97] "Starting endpoint slice config controller"
I1121 14:29:51.493193 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1121 14:29:51.493154 1 config.go:188] "Starting service config controller"
I1121 14:29:51.493237 1 shared_informer.go:311] Waiting for caches to sync for service config
I1121 14:29:51.493237 1 config.go:315] "Starting node config controller"
I1121 14:29:51.493252 1 shared_informer.go:311] Waiting for caches to sync for node config
I1121 14:29:51.593782 1 shared_informer.go:318] Caches are synced for service config
I1121 14:29:51.593822 1 shared_informer.go:318] Caches are synced for node config
I1121 14:29:51.593799 1 shared_informer.go:318] Caches are synced for endpoint slice config
==> kube-scheduler [2ff2d15ad456d7eabe7dc6efd47603a67afa696fd1091b577b9633b6669bd9ec] <==
W1121 14:29:34.007803 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1121 14:29:34.007838 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1121 14:29:34.007899 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1121 14:29:34.007919 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W1121 14:29:34.904012 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1121 14:29:34.904113 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W1121 14:29:34.906819 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1121 14:29:34.906855 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1121 14:29:34.982047 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1121 14:29:34.982173 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1121 14:29:35.046771 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1121 14:29:35.046802 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W1121 14:29:35.065222 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1121 14:29:35.065262 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1121 14:29:35.119288 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1121 14:29:35.119329 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1121 14:29:35.148021 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1121 14:29:35.148079 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1121 14:29:35.156816 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1121 14:29:35.156866 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1121 14:29:35.323566 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1121 14:29:35.323609 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W1121 14:29:35.347343 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1121 14:29:35.347400 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
I1121 14:29:38.002740 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 21 14:29:49 old-k8s-version-012258 kubelet[1516]: I1121 14:29:49.923571 1516 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.836162 1516 topology_manager.go:215] "Topology Admit Handler" podUID="bc079c02-40ff-4f10-947b-76f1e9784572" podNamespace="kube-system" podName="kube-proxy-wsp2w"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.839382 1516 topology_manager.go:215] "Topology Admit Handler" podUID="bd28a6b5-0214-42be-8883-1adf1217761c" podNamespace="kube-system" podName="kindnet-f6t7s"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.946858 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/bc079c02-40ff-4f10-947b-76f1e9784572-xtables-lock\") pod \"kube-proxy-wsp2w\" (UID: \"bc079c02-40ff-4f10-947b-76f1e9784572\") " pod="kube-system/kube-proxy-wsp2w"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.948665 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/bd28a6b5-0214-42be-8883-1adf1217761c-cni-cfg\") pod \"kindnet-f6t7s\" (UID: \"bd28a6b5-0214-42be-8883-1adf1217761c\") " pod="kube-system/kindnet-f6t7s"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.949046 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/bd28a6b5-0214-42be-8883-1adf1217761c-xtables-lock\") pod \"kindnet-f6t7s\" (UID: \"bd28a6b5-0214-42be-8883-1adf1217761c\") " pod="kube-system/kindnet-f6t7s"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.949101 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgrts\" (UniqueName: \"kubernetes.io/projected/bc079c02-40ff-4f10-947b-76f1e9784572-kube-api-access-vgrts\") pod \"kube-proxy-wsp2w\" (UID: \"bc079c02-40ff-4f10-947b-76f1e9784572\") " pod="kube-system/kube-proxy-wsp2w"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.950051 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/bd28a6b5-0214-42be-8883-1adf1217761c-lib-modules\") pod \"kindnet-f6t7s\" (UID: \"bd28a6b5-0214-42be-8883-1adf1217761c\") " pod="kube-system/kindnet-f6t7s"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.950176 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcpxl\" (UniqueName: \"kubernetes.io/projected/bd28a6b5-0214-42be-8883-1adf1217761c-kube-api-access-jcpxl\") pod \"kindnet-f6t7s\" (UID: \"bd28a6b5-0214-42be-8883-1adf1217761c\") " pod="kube-system/kindnet-f6t7s"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.950220 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/bc079c02-40ff-4f10-947b-76f1e9784572-kube-proxy\") pod \"kube-proxy-wsp2w\" (UID: \"bc079c02-40ff-4f10-947b-76f1e9784572\") " pod="kube-system/kube-proxy-wsp2w"
Nov 21 14:29:50 old-k8s-version-012258 kubelet[1516]: I1121 14:29:50.950255 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/bc079c02-40ff-4f10-947b-76f1e9784572-lib-modules\") pod \"kube-proxy-wsp2w\" (UID: \"bc079c02-40ff-4f10-947b-76f1e9784572\") " pod="kube-system/kube-proxy-wsp2w"
Nov 21 14:29:55 old-k8s-version-012258 kubelet[1516]: I1121 14:29:55.257777 1516 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-wsp2w" podStartSLOduration=5.257722111 podCreationTimestamp="2025-11-21 14:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:29:52.247909441 +0000 UTC m=+15.198590387" watchObservedRunningTime="2025-11-21 14:29:55.257722111 +0000 UTC m=+18.208403071"
Nov 21 14:29:55 old-k8s-version-012258 kubelet[1516]: I1121 14:29:55.257917 1516 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-f6t7s" podStartSLOduration=2.158617096 podCreationTimestamp="2025-11-21 14:29:50 +0000 UTC" firstStartedPulling="2025-11-21 14:29:51.458699826 +0000 UTC m=+14.409380763" lastFinishedPulling="2025-11-21 14:29:54.557970689 +0000 UTC m=+17.508651626" observedRunningTime="2025-11-21 14:29:55.257276178 +0000 UTC m=+18.207957124" watchObservedRunningTime="2025-11-21 14:29:55.257887959 +0000 UTC m=+18.208568906"
Nov 21 14:30:05 old-k8s-version-012258 kubelet[1516]: I1121 14:30:05.191422 1516 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 21 14:30:05 old-k8s-version-012258 kubelet[1516]: I1121 14:30:05.216103 1516 topology_manager.go:215] "Topology Admit Handler" podUID="3ca4df79-d875-498c-91b8-059d4f975bd0" podNamespace="kube-system" podName="coredns-5dd5756b68-vst4c"
Nov 21 14:30:05 old-k8s-version-012258 kubelet[1516]: I1121 14:30:05.222388 1516 topology_manager.go:215] "Topology Admit Handler" podUID="4195d236-52f6-4bfd-b47a-9cd7cd89bedd" podNamespace="kube-system" podName="storage-provisioner"
Nov 21 14:30:05 old-k8s-version-012258 kubelet[1516]: I1121 14:30:05.242068 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cp9f\" (UniqueName: \"kubernetes.io/projected/3ca4df79-d875-498c-91b8-059d4f975bd0-kube-api-access-2cp9f\") pod \"coredns-5dd5756b68-vst4c\" (UID: \"3ca4df79-d875-498c-91b8-059d4f975bd0\") " pod="kube-system/coredns-5dd5756b68-vst4c"
Nov 21 14:30:05 old-k8s-version-012258 kubelet[1516]: I1121 14:30:05.242125 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69bsj\" (UniqueName: \"kubernetes.io/projected/4195d236-52f6-4bfd-b47a-9cd7cd89bedd-kube-api-access-69bsj\") pod \"storage-provisioner\" (UID: \"4195d236-52f6-4bfd-b47a-9cd7cd89bedd\") " pod="kube-system/storage-provisioner"
Nov 21 14:30:05 old-k8s-version-012258 kubelet[1516]: I1121 14:30:05.242163 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ca4df79-d875-498c-91b8-059d4f975bd0-config-volume\") pod \"coredns-5dd5756b68-vst4c\" (UID: \"3ca4df79-d875-498c-91b8-059d4f975bd0\") " pod="kube-system/coredns-5dd5756b68-vst4c"
Nov 21 14:30:05 old-k8s-version-012258 kubelet[1516]: I1121 14:30:05.242194 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/4195d236-52f6-4bfd-b47a-9cd7cd89bedd-tmp\") pod \"storage-provisioner\" (UID: \"4195d236-52f6-4bfd-b47a-9cd7cd89bedd\") " pod="kube-system/storage-provisioner"
Nov 21 14:30:06 old-k8s-version-012258 kubelet[1516]: I1121 14:30:06.278995 1516 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=16.278943202 podCreationTimestamp="2025-11-21 14:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:30:06.278908115 +0000 UTC m=+29.229589065" watchObservedRunningTime="2025-11-21 14:30:06.278943202 +0000 UTC m=+29.229624148"
Nov 21 14:30:06 old-k8s-version-012258 kubelet[1516]: I1121 14:30:06.289341 1516 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-vst4c" podStartSLOduration=16.289291859 podCreationTimestamp="2025-11-21 14:29:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:30:06.28907602 +0000 UTC m=+29.239756965" watchObservedRunningTime="2025-11-21 14:30:06.289291859 +0000 UTC m=+29.239972805"
Nov 21 14:30:08 old-k8s-version-012258 kubelet[1516]: I1121 14:30:08.218808 1516 topology_manager.go:215] "Topology Admit Handler" podUID="fa895e52-0bff-4604-8b62-fd0f087015e8" podNamespace="default" podName="busybox"
Nov 21 14:30:08 old-k8s-version-012258 kubelet[1516]: I1121 14:30:08.263005 1516 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbpfl\" (UniqueName: \"kubernetes.io/projected/fa895e52-0bff-4604-8b62-fd0f087015e8-kube-api-access-cbpfl\") pod \"busybox\" (UID: \"fa895e52-0bff-4604-8b62-fd0f087015e8\") " pod="default/busybox"
Nov 21 14:30:11 old-k8s-version-012258 kubelet[1516]: I1121 14:30:11.294015 1516 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.035211506 podCreationTimestamp="2025-11-21 14:30:08 +0000 UTC" firstStartedPulling="2025-11-21 14:30:08.642848367 +0000 UTC m=+31.593529296" lastFinishedPulling="2025-11-21 14:30:10.901611757 +0000 UTC m=+33.852292703" observedRunningTime="2025-11-21 14:30:11.293488867 +0000 UTC m=+34.244169813" watchObservedRunningTime="2025-11-21 14:30:11.293974913 +0000 UTC m=+34.244655858"
==> storage-provisioner [4cd21f31974314e5db6d58ee50bbd67f0daf675c91355ac568f2d0140f7a8d6c] <==
I1121 14:30:05.736193 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1121 14:30:05.746379 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1121 14:30:05.746443 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1121 14:30:05.754349 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1121 14:30:05.754427 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"2ece7dbe-e611-46b3-879d-c0179ba2fde1", APIVersion:"v1", ResourceVersion:"432", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-012258_d783fa48-77b0-4408-a80f-68458be19abb became leader
I1121 14:30:05.754523 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-012258_d783fa48-77b0-4408-a80f-68458be19abb!
I1121 14:30:05.855459 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-012258_d783fa48-77b0-4408-a80f-68458be19abb!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-012258 -n old-k8s-version-012258
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-012258 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestStartStop/group/old-k8s-version/serial/DeployApp (13.97s)